2nd step: new backwards tests from branch. Once 3.4 is released:
- copy final JAR file to backwards/lib folder
- svn merge possible update to the 3.4.0 core tests and test-framework (which may get committed during the release phase) to the backwards/src folder

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/branch_3x@1166714 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/lucene/backwards/lib/lucene-core-3.4.0RC0.jar b/lucene/backwards/lib/lucene-core-3.4.0RC0.jar
new file mode 100644
index 0000000..bf88cbf
--- /dev/null
+++ b/lucene/backwards/lib/lucene-core-3.4.0RC0.jar
Binary files differ
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/analysis/BaseTokenStreamTestCase.java b/lucene/backwards/src/test-framework/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
new file mode 100644
index 0000000..db82596
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
@@ -0,0 +1,334 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.StringReader;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+ 
+import org.apache.lucene.analysis.tokenattributes.*;
+import org.apache.lucene.util.Attribute;
+import org.apache.lucene.util.AttributeImpl;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+/** 
+ * Base class for all Lucene unit tests that use TokenStreams. 
+ * <p>
+ * When writing unit tests for analysis components, its highly recommended
+ * to use the helper methods here (especially in conjunction with {@link MockAnalyzer} or
+ * {@link MockTokenizer}), as they contain many assertions and checks to 
+ * catch bugs.
+ * 
+ * @see MockAnalyzer
+ * @see MockTokenizer
+ */
+public abstract class BaseTokenStreamTestCase extends LuceneTestCase {
+  // some helpers to test Analyzers and TokenStreams:
+  
+  public static interface CheckClearAttributesAttribute extends Attribute {
+    boolean getAndResetClearCalled();
+  }
+
+  public static final class CheckClearAttributesAttributeImpl extends AttributeImpl implements CheckClearAttributesAttribute {
+    private boolean clearCalled = false;
+    
+    public boolean getAndResetClearCalled() {
+      try {
+        return clearCalled;
+      } finally {
+        clearCalled = false;
+      }
+    }
+
+    @Override
+    public void clear() {
+      clearCalled = true;
+    }
+
+    @Override
+    public boolean equals(Object other) {
+      return (
+        other instanceof CheckClearAttributesAttributeImpl &&
+        ((CheckClearAttributesAttributeImpl) other).clearCalled == this.clearCalled
+      );
+    }
+
+    @Override
+    public int hashCode() {
+      return 76137213 ^ Boolean.valueOf(clearCalled).hashCode();
+    }
+    
+    @Override
+    public void copyTo(AttributeImpl target) {
+      ((CheckClearAttributesAttributeImpl) target).clear();
+    }
+  }
+
+  public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[], Integer finalOffset) throws IOException {
+    assertNotNull(output);
+    CheckClearAttributesAttribute checkClearAtt = ts.addAttribute(CheckClearAttributesAttribute.class);
+    
+    assertTrue("has no CharTermAttribute", ts.hasAttribute(CharTermAttribute.class));
+    CharTermAttribute termAtt = ts.getAttribute(CharTermAttribute.class);
+    
+    OffsetAttribute offsetAtt = null;
+    if (startOffsets != null || endOffsets != null || finalOffset != null) {
+      assertTrue("has no OffsetAttribute", ts.hasAttribute(OffsetAttribute.class));
+      offsetAtt = ts.getAttribute(OffsetAttribute.class);
+    }
+    
+    TypeAttribute typeAtt = null;
+    if (types != null) {
+      assertTrue("has no TypeAttribute", ts.hasAttribute(TypeAttribute.class));
+      typeAtt = ts.getAttribute(TypeAttribute.class);
+    }
+    
+    PositionIncrementAttribute posIncrAtt = null;
+    if (posIncrements != null) {
+      assertTrue("has no PositionIncrementAttribute", ts.hasAttribute(PositionIncrementAttribute.class));
+      posIncrAtt = ts.getAttribute(PositionIncrementAttribute.class);
+    }
+    
+    ts.reset();
+    for (int i = 0; i < output.length; i++) {
+      // extra safety to enforce, that the state is not preserved and also assign bogus values
+      ts.clearAttributes();
+      termAtt.setEmpty().append("bogusTerm");
+      if (offsetAtt != null) offsetAtt.setOffset(14584724,24683243);
+      if (typeAtt != null) typeAtt.setType("bogusType");
+      if (posIncrAtt != null) posIncrAtt.setPositionIncrement(45987657);
+      
+      checkClearAtt.getAndResetClearCalled(); // reset it, because we called clearAttribute() before
+      assertTrue("token "+i+" does not exist", ts.incrementToken());
+      assertTrue("clearAttributes() was not called correctly in TokenStream chain", checkClearAtt.getAndResetClearCalled());
+      
+      assertEquals("term "+i, output[i], termAtt.toString());
+      if (startOffsets != null)
+        assertEquals("startOffset "+i, startOffsets[i], offsetAtt.startOffset());
+      if (endOffsets != null)
+        assertEquals("endOffset "+i, endOffsets[i], offsetAtt.endOffset());
+      if (types != null)
+        assertEquals("type "+i, types[i], typeAtt.type());
+      if (posIncrements != null)
+        assertEquals("posIncrement "+i, posIncrements[i], posIncrAtt.getPositionIncrement());
+      
+      // we can enforce some basic things about a few attributes even if the caller doesn't check:
+      if (offsetAtt != null) {
+        assertTrue("startOffset must be >= 0", offsetAtt.startOffset() >= 0);
+        assertTrue("endOffset must be >= 0", offsetAtt.endOffset() >= 0);
+        assertTrue("endOffset must be >= startOffset", offsetAtt.endOffset() >= offsetAtt.startOffset());
+      }
+      if (posIncrAtt != null) {
+        assertTrue("posIncrement must be >= 0", posIncrAtt.getPositionIncrement() >= 0);
+      }
+    }
+    assertFalse("end of stream", ts.incrementToken());
+    ts.end();
+    if (finalOffset != null)
+      assertEquals("finalOffset ", finalOffset.intValue(), offsetAtt.endOffset());
+    if (offsetAtt != null) {
+      assertTrue("finalOffset must be >= 0", offsetAtt.endOffset() >= 0);
+    }
+    ts.close();
+  }
+  
+  public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[]) throws IOException {
+    assertTokenStreamContents(ts, output, startOffsets, endOffsets, types, posIncrements, null);
+  }
+
+  public static void assertTokenStreamContents(TokenStream ts, String[] output) throws IOException {
+    assertTokenStreamContents(ts, output, null, null, null, null, null);
+  }
+  
+  public static void assertTokenStreamContents(TokenStream ts, String[] output, String[] types) throws IOException {
+    assertTokenStreamContents(ts, output, null, null, types, null, null);
+  }
+  
+  public static void assertTokenStreamContents(TokenStream ts, String[] output, int[] posIncrements) throws IOException {
+    assertTokenStreamContents(ts, output, null, null, null, posIncrements, null);
+  }
+  
+  public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[]) throws IOException {
+    assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, null, null);
+  }
+  
+  public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], Integer finalOffset) throws IOException {
+    assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, null, finalOffset);
+  }
+  
+  public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], int[] posIncrements) throws IOException {
+    assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, posIncrements, null);
+  }
+
+  public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], int[] posIncrements, Integer finalOffset) throws IOException {
+    assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, posIncrements, finalOffset);
+  }
+  
+  public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[]) throws IOException {
+    assertTokenStreamContents(a.tokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, input.length());
+  }
+  
+  public static void assertAnalyzesTo(Analyzer a, String input, String[] output) throws IOException {
+    assertAnalyzesTo(a, input, output, null, null, null, null);
+  }
+  
+  public static void assertAnalyzesTo(Analyzer a, String input, String[] output, String[] types) throws IOException {
+    assertAnalyzesTo(a, input, output, null, null, types, null);
+  }
+  
+  public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int[] posIncrements) throws IOException {
+    assertAnalyzesTo(a, input, output, null, null, null, posIncrements);
+  }
+  
+  public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[]) throws IOException {
+    assertAnalyzesTo(a, input, output, startOffsets, endOffsets, null, null);
+  }
+  
+  public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], int[] posIncrements) throws IOException {
+    assertAnalyzesTo(a, input, output, startOffsets, endOffsets, null, posIncrements);
+  }
+  
+
+  public static void assertAnalyzesToReuse(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[]) throws IOException {
+    assertTokenStreamContents(a.reusableTokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, input.length());
+  }
+  
+  public static void assertAnalyzesToReuse(Analyzer a, String input, String[] output) throws IOException {
+    assertAnalyzesToReuse(a, input, output, null, null, null, null);
+  }
+  
+  public static void assertAnalyzesToReuse(Analyzer a, String input, String[] output, String[] types) throws IOException {
+    assertAnalyzesToReuse(a, input, output, null, null, types, null);
+  }
+  
+  public static void assertAnalyzesToReuse(Analyzer a, String input, String[] output, int[] posIncrements) throws IOException {
+    assertAnalyzesToReuse(a, input, output, null, null, null, posIncrements);
+  }
+  
+  public static void assertAnalyzesToReuse(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[]) throws IOException {
+    assertAnalyzesToReuse(a, input, output, startOffsets, endOffsets, null, null);
+  }
+  
+  public static void assertAnalyzesToReuse(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], int[] posIncrements) throws IOException {
+    assertAnalyzesToReuse(a, input, output, startOffsets, endOffsets, null, posIncrements);
+  }
+
+  // simple utility method for testing stemmers
+  
+  public static void checkOneTerm(Analyzer a, final String input, final String expected) throws IOException {
+    assertAnalyzesTo(a, input, new String[]{expected});
+  }
+  
+  public static void checkOneTermReuse(Analyzer a, final String input, final String expected) throws IOException {
+    assertAnalyzesToReuse(a, input, new String[]{expected});
+  }
+  
+  // simple utility method for blasting tokenstreams with data to make sure they don't do anything crazy
+
+  public static void checkRandomData(Random random, Analyzer a, int iterations) throws IOException {
+    checkRandomData(random, a, iterations, 20);
+  }
+
+  public static void checkRandomData(Random random, Analyzer a, int iterations, int maxWordLength) throws IOException {
+    for (int i = 0; i < iterations; i++) {
+      String text;
+      switch(_TestUtil.nextInt(random, 0, 3)) {
+        case 0: 
+          text = _TestUtil.randomSimpleString(random);
+          break;
+        case 1:
+          text = _TestUtil.randomRealisticUnicodeString(random, maxWordLength);
+          break;
+        default:
+          text = _TestUtil.randomUnicodeString(random, maxWordLength);
+      }
+
+      if (VERBOSE) {
+        System.out.println("NOTE: BaseTokenStreamTestCase: get first token stream now text=" + text);
+      }
+
+      TokenStream ts = a.reusableTokenStream("dummy", new StringReader(text));
+      assertTrue("has no CharTermAttribute", ts.hasAttribute(CharTermAttribute.class));
+      CharTermAttribute termAtt = ts.getAttribute(CharTermAttribute.class);
+      OffsetAttribute offsetAtt = ts.hasAttribute(OffsetAttribute.class) ? ts.getAttribute(OffsetAttribute.class) : null;
+      PositionIncrementAttribute posIncAtt = ts.hasAttribute(PositionIncrementAttribute.class) ? ts.getAttribute(PositionIncrementAttribute.class) : null;
+      TypeAttribute typeAtt = ts.hasAttribute(TypeAttribute.class) ? ts.getAttribute(TypeAttribute.class) : null;
+      List<String> tokens = new ArrayList<String>();
+      List<String> types = new ArrayList<String>();
+      List<Integer> positions = new ArrayList<Integer>();
+      List<Integer> startOffsets = new ArrayList<Integer>();
+      List<Integer> endOffsets = new ArrayList<Integer>();
+      ts.reset();
+      while (ts.incrementToken()) {
+        tokens.add(termAtt.toString());
+        if (typeAtt != null) types.add(typeAtt.type());
+        if (posIncAtt != null) positions.add(posIncAtt.getPositionIncrement());
+        if (offsetAtt != null) {
+          startOffsets.add(offsetAtt.startOffset());
+          endOffsets.add(offsetAtt.endOffset());
+        }
+      }
+      ts.end();
+      ts.close();
+      // verify reusing is "reproducable" and also get the normal tokenstream sanity checks
+      if (!tokens.isEmpty()) {
+        if (VERBOSE) {
+          System.out.println("NOTE: BaseTokenStreamTestCase: re-run analysis");
+        }
+        if (typeAtt != null && posIncAtt != null && offsetAtt != null) {
+          // offset + pos + type
+          assertAnalyzesToReuse(a, text, 
+            tokens.toArray(new String[tokens.size()]),
+            toIntArray(startOffsets),
+            toIntArray(endOffsets),
+            types.toArray(new String[types.size()]),
+            toIntArray(positions));
+        } else if (posIncAtt != null && offsetAtt != null) {
+          // offset + pos
+          assertAnalyzesToReuse(a, text, 
+              tokens.toArray(new String[tokens.size()]),
+              toIntArray(startOffsets),
+              toIntArray(endOffsets),
+              toIntArray(positions));
+        } else if (offsetAtt != null) {
+          // offset
+          assertAnalyzesToReuse(a, text, 
+              tokens.toArray(new String[tokens.size()]),
+              toIntArray(startOffsets),
+              toIntArray(endOffsets));
+        } else {
+          // terms only
+          assertAnalyzesToReuse(a, text, 
+              tokens.toArray(new String[tokens.size()]));
+        }
+      }
+    }
+  }
+  
+  static int[] toIntArray(List<Integer> list) {
+    int ret[] = new int[list.size()];
+    int offset = 0;
+    for (Integer i : list) {
+      ret[offset++] = i;
+    }
+    return ret;
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/analysis/CollationTestBase.java b/lucene/backwards/src/test-framework/org/apache/lucene/analysis/CollationTestBase.java
new file mode 100644
index 0000000..8fb540a
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/analysis/CollationTestBase.java
@@ -0,0 +1,316 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.PerFieldAnalyzerWrapper;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermRangeFilter;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TermRangeQuery;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.util.IndexableBinaryStringTools;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.util.HashMap;
+import java.util.Map;
+
+public abstract class CollationTestBase extends LuceneTestCase {
+
+  protected String firstRangeBeginningOriginal = "\u062F";
+  protected String firstRangeEndOriginal = "\u0698";
+  
+  protected String secondRangeBeginningOriginal = "\u0633";
+  protected String secondRangeEndOriginal = "\u0638";
+  
+  /**
+   * Convenience method to perform the same function as CollationKeyFilter.
+   *  
+   * @param keyBits the result from 
+   *  collator.getCollationKey(original).toByteArray()
+   * @return The encoded collation key for the original String
+   */
+  protected String encodeCollationKey(byte[] keyBits) {
+    // Ensure that the backing char[] array is large enough to hold the encoded
+    // Binary String
+    int encodedLength = IndexableBinaryStringTools.getEncodedLength(keyBits, 0, keyBits.length);
+    char[] encodedBegArray = new char[encodedLength];
+    IndexableBinaryStringTools.encode(keyBits, 0, keyBits.length, encodedBegArray, 0, encodedLength);
+    return new String(encodedBegArray);
+  }
+    
+  public void testFarsiRangeFilterCollating(Analyzer analyzer, String firstBeg, 
+                                            String firstEnd, String secondBeg,
+                                            String secondEnd) throws Exception {
+    RAMDirectory ramDir = new RAMDirectory();
+    IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, analyzer));
+    Document doc = new Document();
+    doc.add(new Field("content", "\u0633\u0627\u0628", 
+                      Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(new Field("body", "body",
+                      Field.Store.YES, Field.Index.NOT_ANALYZED));
+    writer.addDocument(doc);
+    writer.close();
+    IndexSearcher searcher = new IndexSearcher(ramDir, true);
+    Query query = new TermQuery(new Term("body","body"));
+
+    // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
+    // orders the U+0698 character before the U+0633 character, so the single
+    // index Term below should NOT be returned by a TermRangeFilter with a Farsi
+    // Collator (or an Arabic one for the case when Farsi searcher not
+    // supported).
+    ScoreDoc[] result = searcher.search
+      (query, new TermRangeFilter("content", firstBeg, firstEnd, true, true), 1).scoreDocs;
+    assertEquals("The index Term should not be included.", 0, result.length);
+
+    result = searcher.search
+      (query, new TermRangeFilter("content", secondBeg, secondEnd, true, true), 1).scoreDocs;
+    assertEquals("The index Term should be included.", 1, result.length);
+
+    searcher.close();
+  }
+ 
+  public void testFarsiRangeQueryCollating(Analyzer analyzer, String firstBeg, 
+                                            String firstEnd, String secondBeg,
+                                            String secondEnd) throws Exception {
+    RAMDirectory ramDir = new RAMDirectory();
+    IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, analyzer));
+    Document doc = new Document();
+
+    // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
+    // orders the U+0698 character before the U+0633 character, so the single
+    // index Term below should NOT be returned by a TermRangeQuery with a Farsi
+    // Collator (or an Arabic one for the case when Farsi is not supported).
+    doc.add(new Field("content", "\u0633\u0627\u0628", 
+                      Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    writer.close();
+    IndexSearcher searcher = new IndexSearcher(ramDir, true);
+
+    Query query = new TermRangeQuery("content", firstBeg, firstEnd, true, true);
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("The index Term should not be included.", 0, hits.length);
+
+    query = new TermRangeQuery("content", secondBeg, secondEnd, true, true);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("The index Term should be included.", 1, hits.length);
+    searcher.close();
+  }
+
+  public void testFarsiTermRangeQuery(Analyzer analyzer, String firstBeg,
+      String firstEnd, String secondBeg, String secondEnd) throws Exception {
+
+    RAMDirectory farsiIndex = new RAMDirectory();
+    IndexWriter writer = new IndexWriter(farsiIndex, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, analyzer));
+    Document doc = new Document();
+    doc.add(new Field("content", "\u0633\u0627\u0628", 
+                      Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(new Field("body", "body",
+                      Field.Store.YES, Field.Index.NOT_ANALYZED));
+    writer.addDocument(doc);
+    writer.close();
+
+    IndexReader reader = IndexReader.open(farsiIndex, true);
+    IndexSearcher search = newSearcher(reader);
+        
+    // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
+    // orders the U+0698 character before the U+0633 character, so the single
+    // index Term below should NOT be returned by a TermRangeQuery
+    // with a Farsi Collator (or an Arabic one for the case when Farsi is 
+    // not supported).
+    Query csrq 
+      = new TermRangeQuery("content", firstBeg, firstEnd, true, true);
+    ScoreDoc[] result = search.search(csrq, null, 1000).scoreDocs;
+    assertEquals("The index Term should not be included.", 0, result.length);
+
+    csrq = new TermRangeQuery
+      ("content", secondBeg, secondEnd, true, true);
+    result = search.search(csrq, null, 1000).scoreDocs;
+    assertEquals("The index Term should be included.", 1, result.length);
+    search.close();
+  }
+  
+  // Test using various international locales with accented characters (which
+  // sort differently depending on locale)
+  //
+  // Copied (and slightly modified) from 
+  // org.apache.lucene.search.TestSort.testInternationalSort()
+  //  
+  // TODO: this test is really fragile. there are already 3 different cases,
+  // depending upon unicode version.
+  public void testCollationKeySort(Analyzer usAnalyzer,
+                                   Analyzer franceAnalyzer,
+                                   Analyzer swedenAnalyzer,
+                                   Analyzer denmarkAnalyzer,
+                                   String usResult,
+                                   String frResult,
+                                   String svResult,
+                                   String dkResult) throws Exception {
+    RAMDirectory indexStore = new RAMDirectory();
+    PerFieldAnalyzerWrapper analyzer
+      = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
+    analyzer.addAnalyzer("US", usAnalyzer);
+    analyzer.addAnalyzer("France", franceAnalyzer);
+    analyzer.addAnalyzer("Sweden", swedenAnalyzer);
+    analyzer.addAnalyzer("Denmark", denmarkAnalyzer);
+    IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, analyzer));
+
+    // document data:
+    // the tracer field is used to determine which document was hit
+    String[][] sortData = new String[][] {
+      // tracer contents US                 France             Sweden (sv_SE)     Denmark (da_DK)
+      {  "A",   "x",     "p\u00EAche",      "p\u00EAche",      "p\u00EAche",      "p\u00EAche"      },
+      {  "B",   "y",     "HAT",             "HAT",             "HAT",             "HAT"             },
+      {  "C",   "x",     "p\u00E9ch\u00E9", "p\u00E9ch\u00E9", "p\u00E9ch\u00E9", "p\u00E9ch\u00E9" },
+      {  "D",   "y",     "HUT",             "HUT",             "HUT",             "HUT"             },
+      {  "E",   "x",     "peach",           "peach",           "peach",           "peach"           },
+      {  "F",   "y",     "H\u00C5T",        "H\u00C5T",        "H\u00C5T",        "H\u00C5T"        },
+      {  "G",   "x",     "sin",             "sin",             "sin",             "sin"             },
+      {  "H",   "y",     "H\u00D8T",        "H\u00D8T",        "H\u00D8T",        "H\u00D8T"        },
+      {  "I",   "x",     "s\u00EDn",        "s\u00EDn",        "s\u00EDn",        "s\u00EDn"        },
+      {  "J",   "y",     "HOT",             "HOT",             "HOT",             "HOT"             },
+    };
+
+    for (int i = 0 ; i < sortData.length ; ++i) {
+      Document doc = new Document();
+      doc.add(new Field("tracer", sortData[i][0], 
+                        Field.Store.YES, Field.Index.NO));
+      doc.add(new Field("contents", sortData[i][1], 
+                        Field.Store.NO, Field.Index.ANALYZED));
+      if (sortData[i][2] != null) 
+        doc.add(new Field("US", sortData[i][2], 
+                          Field.Store.NO, Field.Index.ANALYZED));
+      if (sortData[i][3] != null) 
+        doc.add(new Field("France", sortData[i][3], 
+                          Field.Store.NO, Field.Index.ANALYZED));
+      if (sortData[i][4] != null)
+        doc.add(new Field("Sweden", sortData[i][4], 
+                          Field.Store.NO, Field.Index.ANALYZED));
+      if (sortData[i][5] != null) 
+        doc.add(new Field("Denmark", sortData[i][5], 
+                          Field.Store.NO, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+    writer.optimize();
+    writer.close();
+    Searcher searcher = new IndexSearcher(indexStore, true);
+
+    Sort sort = new Sort();
+    Query queryX = new TermQuery(new Term ("contents", "x"));
+    Query queryY = new TermQuery(new Term ("contents", "y"));
+    
+    sort.setSort(new SortField("US", SortField.STRING));
+    assertMatches(searcher, queryY, sort, usResult);
+
+    sort.setSort(new SortField("France", SortField.STRING));
+    assertMatches(searcher, queryX, sort, frResult);
+
+    sort.setSort(new SortField("Sweden", SortField.STRING));
+    assertMatches(searcher, queryY, sort, svResult);
+
+    sort.setSort(new SortField("Denmark", SortField.STRING));
+    assertMatches(searcher, queryY, sort, dkResult);
+  }
+    
+  // Make sure the documents returned by the search match the expected list
+  // Copied from TestSort.java
+  private void assertMatches(Searcher searcher, Query query, Sort sort, 
+                             String expectedResult) throws IOException {
+    ScoreDoc[] result = searcher.search(query, null, 1000, sort).scoreDocs;
+    StringBuilder buff = new StringBuilder(10);
+    int n = result.length;
+    for (int i = 0 ; i < n ; ++i) {
+      Document doc = searcher.doc(result[i].doc);
+      String[] v = doc.getValues("tracer");
+      for (int j = 0 ; j < v.length ; ++j) {
+        buff.append(v[j]);
+      }
+    }
+    assertEquals(expectedResult, buff.toString());
+  }
+
+  public void assertThreadSafe(final Analyzer analyzer) throws Exception {
+    int numTestPoints = 100;
+    int numThreads = _TestUtil.nextInt(random, 3, 5);
+    final HashMap<String,String> map = new HashMap<String,String>();
+    
+    // create a map<String,SortKey> up front.
+    // then with multiple threads, generate sort keys for all the keys in the map
+    // and ensure they are the same as the ones we produced in serial fashion.
+
+    for (int i = 0; i < numTestPoints; i++) {
+      String term = _TestUtil.randomSimpleString(random);
+      TokenStream ts = analyzer.reusableTokenStream("fake", new StringReader(term));
+      CharTermAttribute encodedBytes = ts.addAttribute(CharTermAttribute.class);
+      ts.reset();
+      assertTrue(ts.incrementToken());
+      // ensure we make a copy of the actual bytes too
+      map.put(term, encodedBytes.toString());
+    }
+    
+    Thread threads[] = new Thread[numThreads];
+    for (int i = 0; i < numThreads; i++) {
+      threads[i] = new Thread() {
+        @Override
+        public void run() {
+          try {
+            for (Map.Entry<String,String> mapping : map.entrySet()) {
+              String term = mapping.getKey();
+              String expected = mapping.getValue();
+              TokenStream ts = analyzer.reusableTokenStream("fake", new StringReader(term));
+              CharTermAttribute encodedBytes = ts.addAttribute(CharTermAttribute.class);
+              ts.reset();
+              assertTrue(ts.incrementToken());
+              assertEquals(expected, encodedBytes.toString());
+            }
+          } catch (IOException e) {
+            throw new RuntimeException(e);
+          }
+        }
+      };
+    }
+    for (int i = 0; i < numThreads; i++) {
+      threads[i].start();
+    }
+    for (int i = 0; i < numThreads; i++) {
+      threads[i].join();
+    }
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/analysis/MockAnalyzer.java b/lucene/backwards/src/test-framework/org/apache/lucene/analysis/MockAnalyzer.java
new file mode 100644
index 0000000..857c095
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/analysis/MockAnalyzer.java
@@ -0,0 +1,170 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Analyzer for testing
+ * <p>
+ * This analyzer is a replacement for Whitespace/Simple/KeywordAnalyzers
+ * for unit tests. If you are testing a custom component such as a queryparser
+ * or analyzer-wrapper that consumes analysis streams, its a great idea to test
+ * it with this analyzer instead. MockAnalyzer has the following behavior:
+ * <ul>
+ *   <li>By default, the assertions in {@link MockTokenizer} are turned on for extra
+ *       checks that the consumer is consuming properly. These checks can be disabled
+ *       with {@link #setEnableChecks(boolean)}.
+ *   <li>Payload data is randomly injected into the stream for more thorough testing
+ *       of payloads.
+ * </ul>
+ * @see MockTokenizer
+ */
+public final class MockAnalyzer extends Analyzer { 
+  private final int pattern;
+  private final boolean lowerCase;
+  private final CharArraySet filter;
+  private final boolean enablePositionIncrements;
+  private int positionIncrementGap;
+  private final Random random;
+  private Map<String,Integer> previousMappings = new HashMap<String,Integer>();
+  private boolean enableChecks = true;
+
+  /**
+   * Creates a new MockAnalyzer.
+   * 
+   * @param random Random for payloads behavior
+   * @param pattern pattern constant describing how tokenization should happen
+   * @param lowerCase true if the tokenizer should lowercase terms
+   * @param filter CharArraySet describing how terms should be filtered (set of stopwords, etc)
+   * @param enablePositionIncrements true if position increments should reflect filtered terms.
+   */
+  public MockAnalyzer(Random random, int pattern, boolean lowerCase, CharArraySet filter, boolean enablePositionIncrements) {
+    this.random = random;
+    this.pattern = pattern;
+    this.lowerCase = lowerCase;
+    this.filter = filter;
+    this.enablePositionIncrements = enablePositionIncrements;
+  }
+
+  /**
+   * Calls {@link #MockAnalyzer(Random, int, boolean, CharArraySet, boolean) 
+   * MockAnalyzer(random, pattern, lowerCase, CharArraySet.EMPTY_STOPSET, false}).
+   */
+  public MockAnalyzer(Random random, int pattern, boolean lowerCase) {
+    this(random, pattern, lowerCase, CharArraySet.EMPTY_SET, false);
+  }
+
+  /** 
+   * Create a Whitespace-lowercasing analyzer with no stopwords removal.
+   * <p>
+   * Calls {@link #MockAnalyzer(Random, int, boolean) 
+   * MockAnalyzer(random, MockTokenizer.WHITESPACE, true)}.
+   */
+  public MockAnalyzer(Random random) {
+    this(random, MockTokenizer.WHITESPACE, true);
+  }
+
+  @Override
+  public TokenStream tokenStream(String fieldName, Reader reader) {
+    MockTokenizer tokenizer = new MockTokenizer(reader, pattern, lowerCase);
+    tokenizer.setEnableChecks(enableChecks);
+    StopFilter filt = new StopFilter(LuceneTestCase.TEST_VERSION_CURRENT, tokenizer, filter);
+    filt.setEnablePositionIncrements(enablePositionIncrements);
+    return maybePayload(filt, fieldName);
+  }
+
+  private class SavedStreams {
+    MockTokenizer tokenizer;
+    TokenFilter filter;
+  }
+
+  @Override
+  public TokenStream reusableTokenStream(String fieldName, Reader reader)
+      throws IOException {
+    @SuppressWarnings("unchecked") Map<String,SavedStreams> map = (Map) getPreviousTokenStream();
+    if (map == null) {
+      map = new HashMap<String,SavedStreams>();
+      setPreviousTokenStream(map);
+    }
+    
+    SavedStreams saved = map.get(fieldName);
+    if (saved == null) {
+      saved = new SavedStreams();
+      saved.tokenizer = new MockTokenizer(reader, pattern, lowerCase);
+      saved.tokenizer.setEnableChecks(enableChecks);
+      StopFilter filt = new StopFilter(LuceneTestCase.TEST_VERSION_CURRENT, saved.tokenizer, filter);
+      filt.setEnablePositionIncrements(enablePositionIncrements);
+      saved.filter = filt;
+      saved.filter = maybePayload(saved.filter, fieldName);
+      map.put(fieldName, saved);
+      return saved.filter;
+    } else {
+      saved.tokenizer.reset(reader);
+      return saved.filter;
+    }
+  }
+  
+  private synchronized TokenFilter maybePayload(TokenFilter stream, String fieldName) {
+    Integer val = previousMappings.get(fieldName);
+    if (val == null) {
+      val = -1; // no payloads
+      if (LuceneTestCase.rarely(random)) {
+        switch(random.nextInt(3)) {
+          case 0: val = -1; // no payloads
+                  break;
+          case 1: val = Integer.MAX_VALUE; // variable length payload
+                  break;
+          case 2: val = random.nextInt(12); // fixed length payload
+                  break;
+        }
+      }
+      previousMappings.put(fieldName, val); // save it so we are consistent for this field
+    }
+    
+    if (val == -1)
+      return stream;
+    else if (val == Integer.MAX_VALUE)
+      return new MockVariableLengthPayloadFilter(random, stream);
+    else
+      return new MockFixedLengthPayloadFilter(random, stream, val);
+  }
+  
+  public void setPositionIncrementGap(int positionIncrementGap){
+    this.positionIncrementGap = positionIncrementGap;
+  }
+  
+  @Override
+  public int getPositionIncrementGap(String fieldName){
+    return positionIncrementGap;
+  }
+  
+  /** 
+   * Toggle consumer workflow checking: if your test consumes tokenstreams normally you
+   * should leave this enabled.
+   */
+  public void setEnableChecks(boolean enableChecks) {
+    this.enableChecks = enableChecks;
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/analysis/MockFixedLengthPayloadFilter.java b/lucene/backwards/src/test-framework/org/apache/lucene/analysis/MockFixedLengthPayloadFilter.java
new file mode 100644
index 0000000..af0c364
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/analysis/MockFixedLengthPayloadFilter.java
@@ -0,0 +1,49 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Random;
+
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.index.Payload;
+
+public final class MockFixedLengthPayloadFilter extends TokenFilter {
+  private final PayloadAttribute payloadAtt = addAttribute(PayloadAttribute.class);
+  private final Random random;
+  private final byte[] bytes;
+  private final Payload payload;
+
+  public MockFixedLengthPayloadFilter(Random random, TokenStream in, int length) {
+    super(in);
+    this.random = random;
+    this.bytes = new byte[length];
+    this.payload = new Payload(bytes);
+  }
+
+  @Override
+  public boolean incrementToken() throws IOException {
+    if (input.incrementToken()) {
+      random.nextBytes(bytes);
+      payloadAtt.setPayload(payload);
+      return true;
+    } else {
+      return false;
+    }
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/analysis/MockTokenizer.java b/lucene/backwards/src/test-framework/org/apache/lucene/analysis/MockTokenizer.java
new file mode 100644
index 0000000..fee3c5d
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/analysis/MockTokenizer.java
@@ -0,0 +1,195 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.util.AttributeSource.AttributeFactory;
+
+/**
+ * Tokenizer for testing.
+ * <p>
+ * This tokenizer is a replacement for {@link #WHITESPACE}, {@link #SIMPLE}, and {@link #KEYWORD}
+ * tokenizers. If you are writing a component such as a TokenFilter, its a great idea to test
+ * it wrapping this tokenizer instead for extra checks. This tokenizer has the following behavior:
+ * <ul>
+ *   <li>An internal state-machine is used for checking consumer consistency. These checks can
+ *       be disabled with {@link #setEnableChecks(boolean)}.
+ *   <li>For convenience, optionally lowercases terms that it outputs.
+ * </ul>
+ */
+public class MockTokenizer extends Tokenizer {
+  /** Acts Similar to WhitespaceTokenizer */
+  public static final int WHITESPACE = 0; 
+  /** Acts Similar to KeywordTokenizer.
+   * TODO: Keyword returns an "empty" token for an empty reader... 
+   */
+  public static final int KEYWORD = 1;
+  /** Acts like LetterTokenizer. */
+  public static final int SIMPLE = 2;
+
+  private final int pattern;
+  private final boolean lowerCase;
+  private final int maxTokenLength;
+  public static final int DEFAULT_MAX_TOKEN_LENGTH = Integer.MAX_VALUE;
+
+  private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+  private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+  int off = 0;
+
+  // TODO: "register" with LuceneTestCase to ensure all streams are closed() ?
+  // currently, we can only check that the lifecycle is correct if someone is reusing,
+  // but not for "one-offs".
+  private static enum State { 
+    SETREADER,       // consumer set a reader input either via ctor or via reset(Reader)
+    RESET,           // consumer has called reset()
+    INCREMENT,       // consumer is consuming, has called incrementToken() == true
+    INCREMENT_FALSE, // consumer has called incrementToken() which returned false
+    END,             // consumer has called end() to perform end of stream operations
+    CLOSE            // consumer has called close() to release any resources
+  };
+  
+  private State streamState = State.CLOSE;
+  private boolean enableChecks = true;
+  
+  public MockTokenizer(AttributeFactory factory, Reader input, int pattern, boolean lowerCase, int maxTokenLength) {
+    super(factory, input);
+    this.pattern = pattern;
+    this.lowerCase = lowerCase;
+    this.streamState = State.SETREADER;
+    this.maxTokenLength = maxTokenLength;
+  }
+
+  public MockTokenizer(Reader input, int pattern, boolean lowerCase, int maxTokenLength) {
+    this(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, input, pattern, lowerCase, maxTokenLength);
+  }
+
+  public MockTokenizer(Reader input, int pattern, boolean lowerCase) {
+    this(input, pattern, lowerCase, DEFAULT_MAX_TOKEN_LENGTH);
+  }
+  
+  @Override
+  public final boolean incrementToken() throws IOException {
+    assert !enableChecks || (streamState == State.RESET || streamState == State.INCREMENT) 
+                            : "incrementToken() called while in wrong state: " + streamState;
+    clearAttributes();
+    for (;;) {
+      int startOffset = off;
+      int cp = readCodePoint();
+      if (cp < 0) {
+        break;
+      } else if (isTokenChar(cp)) {
+        int endOffset;
+        do {
+          char chars[] = Character.toChars(normalize(cp));
+          for (int i = 0; i < chars.length; i++)
+            termAtt.append(chars[i]);
+          endOffset = off;
+          if (termAtt.length() >= maxTokenLength) {
+            break;
+          }
+          cp = readCodePoint();
+        } while (cp >= 0 && isTokenChar(cp));
+        offsetAtt.setOffset(correctOffset(startOffset), correctOffset(endOffset));
+        streamState = State.INCREMENT;
+        return true;
+      }
+    }
+    streamState = State.INCREMENT_FALSE;
+    return false;
+  }
+
+  protected int readCodePoint() throws IOException {
+    int ch = input.read();
+    if (ch < 0) {
+      return ch;
+    } else {
+      assert ch != 0xffff; /* only on 3.x */
+      assert !Character.isLowSurrogate((char) ch);
+      off++;
+      if (Character.isHighSurrogate((char) ch)) {
+        int ch2 = input.read();
+        if (ch2 >= 0) {
+          off++;
+          assert Character.isLowSurrogate((char) ch2);
+          return Character.toCodePoint((char) ch, (char) ch2);
+        }
+      }
+      return ch;
+    }
+  }
+
+  protected boolean isTokenChar(int c) {
+    switch(pattern) {
+      case WHITESPACE: return !Character.isWhitespace(c);
+      case KEYWORD: return true;
+      case SIMPLE: return Character.isLetter(c);
+      default: throw new RuntimeException("invalid pattern constant:" + pattern);
+    }
+  }
+  
+  protected int normalize(int c) {
+    return lowerCase ? Character.toLowerCase(c) : c;
+  }
+
+  @Override
+  public void reset() throws IOException {
+    super.reset();
+    off = 0;
+    assert !enableChecks || streamState != State.RESET : "double reset()";
+    streamState = State.RESET;
+  }
+  
+  @Override
+  public void close() throws IOException {
+    super.close();
+    // in some exceptional cases (e.g. TestIndexWriterExceptions) a test can prematurely close()
+    // these tests should disable this check, by default we check the normal workflow.
+    // TODO: investigate the CachingTokenFilter "double-close"... for now we ignore this
+    assert !enableChecks || streamState == State.END || streamState == State.CLOSE : "close() called in wrong state: " + streamState;
+    streamState = State.CLOSE;
+  }
+
+  @Override
+  public void reset(Reader input) throws IOException {
+    super.reset(input);
+    assert !enableChecks || streamState == State.CLOSE : "setReader() called in wrong state: " + streamState;
+    streamState = State.SETREADER;
+  }
+
+  @Override
+  public void end() throws IOException {
+    int finalOffset = correctOffset(off);
+    offsetAtt.setOffset(finalOffset, finalOffset);
+    // some tokenizers, such as limiting tokenizers, call end() before incrementToken() returns false.
+    // these tests should disable this check (in general you should consume the entire stream)
+    assert !enableChecks || streamState == State.INCREMENT_FALSE : "end() called before incrementToken() returned false!";
+    streamState = State.END;
+  }
+
+  /** 
+   * Toggle consumer workflow checking: if your test consumes tokenstreams normally you
+   * should leave this enabled.
+   */
+  public void setEnableChecks(boolean enableChecks) {
+    this.enableChecks = enableChecks;
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/analysis/MockVariableLengthPayloadFilter.java b/lucene/backwards/src/test-framework/org/apache/lucene/analysis/MockVariableLengthPayloadFilter.java
new file mode 100644
index 0000000..f7b5361
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/analysis/MockVariableLengthPayloadFilter.java
@@ -0,0 +1,51 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Random;
+
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.index.Payload;
+
+public final class MockVariableLengthPayloadFilter extends TokenFilter {
+  private static final int MAXLENGTH = 129;
+
+  private final PayloadAttribute payloadAtt = addAttribute(PayloadAttribute.class);
+  private final Random random;
+  private final byte[] bytes = new byte[MAXLENGTH];
+  private final Payload payload;
+
+  public MockVariableLengthPayloadFilter(Random random, TokenStream in) {
+    super(in);
+    this.random = random;
+    this.payload = new Payload(bytes);
+  }
+
+  @Override
+  public boolean incrementToken() throws IOException {
+    if (input.incrementToken()) {
+      random.nextBytes(bytes);
+      payload.setData(bytes, 0, random.nextInt(MAXLENGTH));
+      payloadAtt.setPayload(payload);
+      return true;
+    } else {
+      return false;
+    }
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/analysis/VocabularyAssert.java b/lucene/backwards/src/test-framework/org/apache/lucene/analysis/VocabularyAssert.java
new file mode 100644
index 0000000..820455a
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/analysis/VocabularyAssert.java
@@ -0,0 +1,82 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.util.zip.ZipFile;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.junit.Assert;
+
+/** Utility class for doing vocabulary-based stemming tests */
+public class VocabularyAssert {
+  /** Run a vocabulary test against two data files. */
+  public static void assertVocabulary(Analyzer a, InputStream voc, InputStream out)
+  throws IOException {
+    BufferedReader vocReader = new BufferedReader(
+        new InputStreamReader(voc, "UTF-8"));
+    BufferedReader outputReader = new BufferedReader(
+        new InputStreamReader(out, "UTF-8"));
+    String inputWord = null;
+    while ((inputWord = vocReader.readLine()) != null) {
+      String expectedWord = outputReader.readLine();
+      Assert.assertNotNull(expectedWord);
+      BaseTokenStreamTestCase.checkOneTermReuse(a, inputWord, expectedWord);
+    }
+  }
+  
+  /** Run a vocabulary test against one file: tab separated. */
+  public static void assertVocabulary(Analyzer a, InputStream vocOut)
+  throws IOException {
+    BufferedReader vocReader = new BufferedReader(
+        new InputStreamReader(vocOut, "UTF-8"));
+    String inputLine = null;
+    while ((inputLine = vocReader.readLine()) != null) {
+      if (inputLine.startsWith("#") || inputLine.trim().length() == 0)
+        continue; /* comment */
+      String words[] = inputLine.split("\t");
+      BaseTokenStreamTestCase.checkOneTermReuse(a, words[0], words[1]);
+    }
+  }
+  
+  /** Run a vocabulary test against two data files inside a zip file */
+  public static void assertVocabulary(Analyzer a, File zipFile, String voc, String out)
+  throws IOException {
+    ZipFile zip = new ZipFile(zipFile);
+    InputStream v = zip.getInputStream(zip.getEntry(voc));
+    InputStream o = zip.getInputStream(zip.getEntry(out));
+    assertVocabulary(a, v, o);
+    v.close();
+    o.close();
+    zip.close();
+  }
+  
+  /** Run a vocabulary test against a tab-separated data file inside a zip file */
+  public static void assertVocabulary(Analyzer a, File zipFile, String vocOut)
+  throws IOException {
+    ZipFile zip = new ZipFile(zipFile);
+    InputStream vo = zip.getInputStream(zip.getEntry(vocOut));
+    assertVocabulary(a, vo);
+    vo.close();
+    zip.close();
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/index/DocHelper.java b/lucene/backwards/src/test-framework/org/apache/lucene/index/DocHelper.java
new file mode 100644
index 0000000..ab6e78f
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/index/DocHelper.java
@@ -0,0 +1,275 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+import static org.apache.lucene.util.LuceneTestCase.TEST_VERSION_CURRENT;
+
+class DocHelper {
+  public static final String FIELD_1_TEXT = "field one text";
+  public static final String TEXT_FIELD_1_KEY = "textField1";
+  public static Field textField1 = new Field(TEXT_FIELD_1_KEY, FIELD_1_TEXT,
+      Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO);
+  
+  public static final String FIELD_2_TEXT = "field field field two text";
+  //Fields will be lexicographically sorted.  So, the order is: field, text, two
+  public static final int [] FIELD_2_FREQS = {3, 1, 1}; 
+  public static final String TEXT_FIELD_2_KEY = "textField2";
+  public static Field textField2 = new Field(TEXT_FIELD_2_KEY, FIELD_2_TEXT, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+  
+  public static final String FIELD_3_TEXT = "aaaNoNorms aaaNoNorms bbbNoNorms";
+  public static final String TEXT_FIELD_3_KEY = "textField3";
+  public static Field textField3 = new Field(TEXT_FIELD_3_KEY, FIELD_3_TEXT, Field.Store.YES, Field.Index.ANALYZED);
+  static { textField3.setOmitNorms(true); }
+
+  public static final String KEYWORD_TEXT = "Keyword";
+  public static final String KEYWORD_FIELD_KEY = "keyField";
+  public static Field keyField = new Field(KEYWORD_FIELD_KEY, KEYWORD_TEXT,
+      Field.Store.YES, Field.Index.NOT_ANALYZED);
+
+  public static final String NO_NORMS_TEXT = "omitNormsText";
+  public static final String NO_NORMS_KEY = "omitNorms";
+  public static Field noNormsField = new Field(NO_NORMS_KEY, NO_NORMS_TEXT,
+      Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
+
+  public static final String NO_TF_TEXT = "analyzed with no tf and positions";
+  public static final String NO_TF_KEY = "omitTermFreqAndPositions";
+  public static Field noTFField = new Field(NO_TF_KEY, NO_TF_TEXT,
+      Field.Store.YES, Field.Index.ANALYZED);
+  static {
+    noTFField.setIndexOptions(IndexOptions.DOCS_ONLY);
+  }
+
+  public static final String UNINDEXED_FIELD_TEXT = "unindexed field text";
+  public static final String UNINDEXED_FIELD_KEY = "unIndField";
+  public static Field unIndField = new Field(UNINDEXED_FIELD_KEY, UNINDEXED_FIELD_TEXT,
+      Field.Store.YES, Field.Index.NO);
+
+
+  public static final String UNSTORED_1_FIELD_TEXT = "unstored field text";
+  public static final String UNSTORED_FIELD_1_KEY = "unStoredField1";
+  public static Field unStoredField1 = new Field(UNSTORED_FIELD_1_KEY, UNSTORED_1_FIELD_TEXT,
+      Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO);
+
+  public static final String UNSTORED_2_FIELD_TEXT = "unstored field text";
+  public static final String UNSTORED_FIELD_2_KEY = "unStoredField2";
+  public static Field unStoredField2 = new Field(UNSTORED_FIELD_2_KEY, UNSTORED_2_FIELD_TEXT,
+      Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES);
+
+  public static final String LAZY_FIELD_BINARY_KEY = "lazyFieldBinary";
+  public static byte [] LAZY_FIELD_BINARY_BYTES;
+  public static Field lazyFieldBinary;
+  
+  public static final String LAZY_FIELD_KEY = "lazyField";
+  public static final String LAZY_FIELD_TEXT = "These are some field bytes";
+  public static Field lazyField = new Field(LAZY_FIELD_KEY, LAZY_FIELD_TEXT, Field.Store.YES, Field.Index.ANALYZED);
+  
+  public static final String LARGE_LAZY_FIELD_KEY = "largeLazyField";
+  public static String LARGE_LAZY_FIELD_TEXT;
+  public static Field largeLazyField;
+  
+  //From Issue 509
+  public static final String FIELD_UTF1_TEXT = "field one \u4e00text";
+  public static final String TEXT_FIELD_UTF1_KEY = "textField1Utf8";
+  public static Field textUtfField1 = new Field(TEXT_FIELD_UTF1_KEY, FIELD_UTF1_TEXT,
+      Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO);
+
+  public static final String FIELD_UTF2_TEXT = "field field field \u4e00two text";
+  //Fields will be lexicographically sorted.  So, the order is: field, text, two
+  public static final int [] FIELD_UTF2_FREQS = {3, 1, 1};
+  public static final String TEXT_FIELD_UTF2_KEY = "textField2Utf8";
+  public static Field textUtfField2 = new Field(TEXT_FIELD_UTF2_KEY, FIELD_UTF2_TEXT, Field.Store.YES, 
+          Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+ 
+  
+  
+  
+  public static Map<String,Object> nameValues = null;
+
+  // ordered list of all the fields...
+  // could use LinkedHashMap for this purpose if Java1.4 is OK
+  public static Field[] fields = new Field[] {
+    textField1,
+    textField2,
+    textField3,
+    keyField,
+    noNormsField,
+    noTFField,
+    unIndField,
+    unStoredField1,
+    unStoredField2,
+    textUtfField1,
+    textUtfField2,
+    lazyField,
+    lazyFieldBinary,//placeholder for binary field, since this is null.  It must be second to last.
+    largeLazyField//placeholder for large field, since this is null.  It must always be last
+  };
+
+  public static Map<String,Fieldable> all     =new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> indexed =new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> stored  =new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> unstored=new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> unindexed=new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> termvector=new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> notermvector=new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> lazy= new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> noNorms=new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> noTf=new HashMap<String,Fieldable>();
+
+  static {
+    //Initialize the large Lazy Field
+    StringBuilder buffer = new StringBuilder();
+    for (int i = 0; i < 10000; i++)
+    {
+      buffer.append("Lazily loading lengths of language in lieu of laughing ");
+    }
+    
+    try {
+      LAZY_FIELD_BINARY_BYTES = "These are some binary field bytes".getBytes("UTF8");
+    } catch (UnsupportedEncodingException e) {
+    }
+    lazyFieldBinary = new Field(LAZY_FIELD_BINARY_KEY, LAZY_FIELD_BINARY_BYTES);
+    fields[fields.length - 2] = lazyFieldBinary;
+    LARGE_LAZY_FIELD_TEXT = buffer.toString();
+    largeLazyField = new Field(LARGE_LAZY_FIELD_KEY, LARGE_LAZY_FIELD_TEXT, Field.Store.YES, Field.Index.ANALYZED);
+    fields[fields.length - 1] = largeLazyField;
+    for (int i=0; i<fields.length; i++) {
+      Fieldable f = fields[i];
+      add(all,f);
+      if (f.isIndexed()) add(indexed,f);
+      else add(unindexed,f);
+      if (f.isTermVectorStored()) add(termvector,f);
+      if (f.isIndexed() && !f.isTermVectorStored()) add(notermvector,f);
+      if (f.isStored()) add(stored,f);
+      else add(unstored,f);
+      if (f.getOmitNorms()) add(noNorms,f);
+      if (f.getIndexOptions() == IndexOptions.DOCS_ONLY) add(noTf,f);
+      if (f.isLazy()) add(lazy, f);
+    }
+  }
+
+
+  private static void add(Map<String,Fieldable> map, Fieldable field) {
+    map.put(field.name(), field);
+  }
+
+
+  static
+  {
+    nameValues = new HashMap<String,Object>();
+    nameValues.put(TEXT_FIELD_1_KEY, FIELD_1_TEXT);
+    nameValues.put(TEXT_FIELD_2_KEY, FIELD_2_TEXT);
+    nameValues.put(TEXT_FIELD_3_KEY, FIELD_3_TEXT);
+    nameValues.put(KEYWORD_FIELD_KEY, KEYWORD_TEXT);
+    nameValues.put(NO_NORMS_KEY, NO_NORMS_TEXT);
+    nameValues.put(NO_TF_KEY, NO_TF_TEXT);
+    nameValues.put(UNINDEXED_FIELD_KEY, UNINDEXED_FIELD_TEXT);
+    nameValues.put(UNSTORED_FIELD_1_KEY, UNSTORED_1_FIELD_TEXT);
+    nameValues.put(UNSTORED_FIELD_2_KEY, UNSTORED_2_FIELD_TEXT);
+    nameValues.put(LAZY_FIELD_KEY, LAZY_FIELD_TEXT);
+    nameValues.put(LAZY_FIELD_BINARY_KEY, LAZY_FIELD_BINARY_BYTES);
+    nameValues.put(LARGE_LAZY_FIELD_KEY, LARGE_LAZY_FIELD_TEXT);
+    nameValues.put(TEXT_FIELD_UTF1_KEY, FIELD_UTF1_TEXT);
+    nameValues.put(TEXT_FIELD_UTF2_KEY, FIELD_UTF2_TEXT);
+  }   
+  
+  /**
+   * Adds the fields above to a document 
+   * @param doc The document to write
+   */ 
+  public static void setupDoc(Document doc) {
+    for (int i=0; i<fields.length; i++) {
+      doc.add(fields[i]);
+    }
+  }                         
+
+  /**
+   * Writes the document to the directory using a segment
+   * named "test"; returns the SegmentInfo describing the new
+   * segment 
+   * @param dir
+   * @param doc
+   * @throws IOException
+   */ 
+  public static SegmentInfo writeDoc(Random random, Directory dir, Document doc) throws IOException
+  {
+    return writeDoc(random, dir, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), null, doc);
+  }
+
+  /**
+   * Writes the document to the directory using the analyzer
+   * and the similarity score; returns the SegmentInfo
+   * describing the new segment
+   * @param dir
+   * @param analyzer
+   * @param similarity
+   * @param doc
+   * @throws IOException
+   */ 
+  public static SegmentInfo writeDoc(Random random, Directory dir, Analyzer analyzer, Similarity similarity, Document doc) throws IOException {
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( /* LuceneTestCase.newIndexWriterConfig(random, */
+        TEST_VERSION_CURRENT, analyzer).setSimilarity(similarity));
+    //writer.setUseCompoundFile(false);
+    writer.addDocument(doc);
+    writer.commit();
+    SegmentInfo info = writer.newestSegment();
+    writer.close();
+    return info;
+  }
+
+  public static int numFields(Document doc) {
+    return doc.getFields().size();
+  }
+  
+  public static Document createDocument(int n, String indexName, int numFields) {
+    StringBuilder sb = new StringBuilder();
+    Document doc = new Document();
+    doc.add(new Field("id", Integer.toString(n), Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("indexname", indexName, Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    sb.append("a");
+    sb.append(n);
+    doc.add(new Field("field1", sb.toString(), Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    sb.append(" b");
+    sb.append(n);
+    for (int i = 1; i < numFields; i++) {
+      doc.add(new Field("field" + (i + 1), sb.toString(), Store.YES,
+                        Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    }
+    return doc;
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/index/MockIndexInput.java b/lucene/backwards/src/test-framework/org/apache/lucene/index/MockIndexInput.java
new file mode 100644
index 0000000..1e2346c
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/index/MockIndexInput.java
@@ -0,0 +1,64 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.store.BufferedIndexInput;
+
+public class MockIndexInput extends BufferedIndexInput {
+    private byte[] buffer;
+    private int pointer = 0;
+    private long length;
+
+    public MockIndexInput(byte[] bytes) {
+        buffer = bytes;
+        length = bytes.length;
+    }
+
+    @Override
+    protected void readInternal(byte[] dest, int destOffset, int len) {
+        int remainder = len;
+        int start = pointer;
+        while (remainder != 0) {
+//          int bufferNumber = start / buffer.length;
+          int bufferOffset = start % buffer.length;
+          int bytesInBuffer = buffer.length - bufferOffset;
+          int bytesToCopy = bytesInBuffer >= remainder ? remainder : bytesInBuffer;
+          System.arraycopy(buffer, bufferOffset, dest, destOffset, bytesToCopy);
+          destOffset += bytesToCopy;
+          start += bytesToCopy;
+          remainder -= bytesToCopy;
+        }
+        pointer += len;
+    }
+
+    @Override
+    public void close() {
+        // ignore
+    }
+
+    @Override
+    protected void seekInternal(long pos) {
+        pointer = (int) pos;
+    }
+
+    @Override
+    public long length() {
+      return length;
+    }
+
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/index/MockRandomMergePolicy.java b/lucene/backwards/src/test-framework/org/apache/lucene/index/MockRandomMergePolicy.java
new file mode 100644
index 0000000..1ff3543
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/index/MockRandomMergePolicy.java
@@ -0,0 +1,111 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+import java.util.Map;
+
+import org.apache.lucene.util._TestUtil;
+
+public class MockRandomMergePolicy extends MergePolicy {
+  private final Random random;
+
+  public MockRandomMergePolicy(Random random) {
+    // fork a private random, since we are called
+    // unpredictably from threads:
+    this.random = new Random(random.nextLong());
+  }
+
+  @Override
+  public MergeSpecification findMerges(SegmentInfos segmentInfos) {
+    MergeSpecification mergeSpec = null;
+    //System.out.println("MRMP: findMerges sis=" + segmentInfos);
+
+    if (segmentInfos.size() > 1 && random.nextInt(5) == 3) {
+      
+      List<SegmentInfo> segments = new ArrayList<SegmentInfo>(segmentInfos.asList());
+      Collections.shuffle(segments, random);
+
+      // TODO: sometimes make more than 1 merge?
+      mergeSpec = new MergeSpecification();
+      final int segsToMerge = _TestUtil.nextInt(random, 1, segmentInfos.size());
+      mergeSpec.add(new OneMerge(segments.subList(0, segsToMerge)));
+    }
+
+    return mergeSpec;
+  }
+
+  @Override
+  public MergeSpecification findMergesForOptimize(
+       SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToOptimize)
+    throws CorruptIndexException, IOException {
+
+    final List<SegmentInfo> eligibleSegments = new ArrayList<SegmentInfo>();
+    for(SegmentInfo info : segmentInfos) {
+      if (segmentsToOptimize.containsKey(info)) {
+        eligibleSegments.add(info);
+      }
+    }
+
+    //System.out.println("MRMP: findMergesForOptimize sis=" + segmentInfos + " eligible=" + eligibleSegments);
+    MergeSpecification mergeSpec = null;
+    if (eligibleSegments.size() > 1 || (eligibleSegments.size() == 1 && eligibleSegments.get(0).hasDeletions())) {
+      mergeSpec = new MergeSpecification();
+      // Already shuffled having come out of a set but
+      // shuffle again for good measure:
+      Collections.shuffle(eligibleSegments, random);
+      int upto = 0;
+      while(upto < eligibleSegments.size()) {
+        int max = Math.min(10, eligibleSegments.size()-upto);
+        int inc = max <= 2 ? max : _TestUtil.nextInt(random, 2, max);
+        mergeSpec.add(new OneMerge(eligibleSegments.subList(upto, upto+inc)));
+        upto += inc;
+      }
+    }
+
+    if (mergeSpec != null) {
+      for(OneMerge merge : mergeSpec.merges) {
+        for(SegmentInfo info : merge.segments) {
+          assert segmentsToOptimize.containsKey(info);
+        }
+      }
+    }
+    return mergeSpec;
+  }
+
+  @Override
+  public MergeSpecification findMergesToExpungeDeletes(
+      SegmentInfos segmentInfos)
+    throws CorruptIndexException, IOException {
+    return findMerges(segmentInfos);
+  }
+
+  @Override
+  public void close() {
+  }
+
+  @Override
+  public boolean useCompoundFile(SegmentInfos infos, SegmentInfo mergedInfo) throws IOException {
+    // 80% of the time we create CFS:
+    return random.nextInt(5) != 1;
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java b/lucene/backwards/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java
new file mode 100644
index 0000000..5029b9a
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java
@@ -0,0 +1,258 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Random;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexWriter; // javadoc
+import org.apache.lucene.search.Query;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
+import org.apache.lucene.util._TestUtil;
+
+/** Silly class that randomizes the indexing experience.  EG
+ *  it may swap in a different merge policy/scheduler; may
+ *  commit periodically; may or may not optimize in the end,
+ *  may flush by doc count instead of RAM, etc. 
+ */
+
+public class RandomIndexWriter implements Closeable {
+
+  public IndexWriter w;
+  private final Random r;
+  int docCount;
+  int flushAt;
+  private double flushAtFactor = 1.0;
+  private boolean getReaderCalled;
+
+  // Randomly calls Thread.yield so we mixup thread scheduling
+  private static final class MockIndexWriter extends IndexWriter {
+
+    private final Random r;
+
+    public MockIndexWriter(Random r,Directory dir, IndexWriterConfig conf) throws IOException {
+      super(dir, conf);
+      // must make a private random since our methods are
+      // called from different threads; else test failures may
+      // not be reproducible from the original seed
+      this.r = new Random(r.nextInt());
+    }
+
+    @Override
+    boolean testPoint(String name) {
+      if (r.nextInt(4) == 2)
+        Thread.yield();
+      return true;
+    }
+  }
+
+  /** create a RandomIndexWriter with a random config: Uses TEST_VERSION_CURRENT and Whitespace+LowercasingAnalyzer */
+  public RandomIndexWriter(Random r, Directory dir) throws IOException {
+    this(r, dir, LuceneTestCase.newIndexWriterConfig(r, LuceneTestCase.TEST_VERSION_CURRENT, new MockAnalyzer(r)));
+  }
+  
+  /** create a RandomIndexWriter with a random config: Uses TEST_VERSION_CURRENT */
+  public RandomIndexWriter(Random r, Directory dir, Analyzer a) throws IOException {
+    this(r, dir, LuceneTestCase.newIndexWriterConfig(r, LuceneTestCase.TEST_VERSION_CURRENT, a));
+  }
+  
+  /** create a RandomIndexWriter with a random config */
+  public RandomIndexWriter(Random r, Directory dir, Version v, Analyzer a) throws IOException {
+    this(r, dir, LuceneTestCase.newIndexWriterConfig(r, v, a));
+  }
+  
+  /** create a RandomIndexWriter with the provided config */
+  public RandomIndexWriter(Random r, Directory dir, IndexWriterConfig c) throws IOException {
+    this.r = r;
+    w = new MockIndexWriter(r, dir, c);
+    flushAt = _TestUtil.nextInt(r, 10, 1000);
+    if (LuceneTestCase.VERBOSE) {
+      System.out.println("RIW config=" + w.getConfig());
+    }
+  } 
+
+  /**
+   * Adds a Document.
+   * @see IndexWriter#addDocument(Document)
+   */
+  public void addDocument(final Document doc) throws IOException {
+    if (r.nextInt(5) == 3) {
+      // TODO: maybe, we should simply buffer up added docs
+      // (but we need to clone them), and only when
+      // getReader, commit, etc. are called, we do an
+      // addDocuments?  Would be better testing.
+      w.addDocuments(Collections.singletonList(doc));
+    } else {
+      w.addDocument(doc);
+    }
+    maybeCommit();
+  }
+  
+  public void addDocuments(Collection<Document> docs) throws IOException {
+    w.addDocuments(docs);
+    maybeCommit();
+  }
+
+  public void updateDocuments(Term delTerm, Collection<Document> docs) throws IOException {
+    w.updateDocuments(delTerm, docs);
+    maybeCommit();
+  }
+
+  private void maybeCommit() throws IOException {
+    if (docCount++ == flushAt) {
+      if (LuceneTestCase.VERBOSE) {
+        System.out.println("RIW.add/updateDocument: now doing a commit at docCount=" + docCount);
+      }
+      w.commit();
+      flushAt += _TestUtil.nextInt(r, (int) (flushAtFactor * 10), (int) (flushAtFactor * 1000));
+      if (flushAtFactor < 2e6) {
+        // gradually but exponentially increase time b/w flushes
+        flushAtFactor *= 1.05;
+      }
+    }
+  }
+
+  /**
+   * Updates a document.
+   * @see IndexWriter#updateDocument(Term, Document)
+   */
+  public void updateDocument(Term t, final Document doc) throws IOException {
+    if (r.nextInt(5) == 3) {
+      w.updateDocuments(t, Collections.singletonList(doc));
+    } else {
+      w.updateDocument(t, doc);
+    }
+    maybeCommit();
+  }
+  
+  public void addIndexes(Directory... dirs) throws CorruptIndexException, IOException {
+    w.addIndexes(dirs);
+  }
+  
+  public void deleteDocuments(Term term) throws CorruptIndexException, IOException {
+    w.deleteDocuments(term);
+  }
+
+  public void deleteDocuments(Query q) throws CorruptIndexException, IOException {
+    w.deleteDocuments(q);
+  }
+  
+  public void commit() throws CorruptIndexException, IOException {
+    w.commit();
+  }
+  
+  public int numDocs() throws IOException {
+    return w.numDocs();
+  }
+
+  public int maxDoc() {
+    return w.maxDoc();
+  }
+
+  public void deleteAll() throws IOException {
+    w.deleteAll();
+  }
+
+  private boolean doRandomOptimize = true;
+  private boolean doRandomOptimizeAssert = true;
+
+  public void setDoRandomOptimize(boolean v) {
+    doRandomOptimize = v;
+  }
+
+  public void setDoRandomOptimizeAssert(boolean v) {
+    doRandomOptimizeAssert = v;
+  }
+
+  private void doRandomOptimize() throws IOException {
+    if (doRandomOptimize) {
+      final int segCount = w.getSegmentCount();
+      if (r.nextBoolean() || segCount == 0) {
+        // full optimize
+        w.optimize();
+      } else {
+        // partial optimize
+        final int limit = _TestUtil.nextInt(r, 1, segCount);
+        w.optimize(limit);
+        assert !doRandomOptimizeAssert || w.getSegmentCount() <= limit: "limit=" + limit + " actual=" + w.getSegmentCount();
+      }
+    }
+  }
+
+  public IndexReader getReader() throws IOException {
+    return getReader(true);
+  }
+
+  public IndexReader getReader(boolean applyDeletions) throws IOException {
+    getReaderCalled = true;
+    if (r.nextInt(4) == 2) {
+      doRandomOptimize();
+    }
+    if (r.nextBoolean()) {
+      if (LuceneTestCase.VERBOSE) {
+        System.out.println("RIW.getReader: use NRT reader");
+      }
+      if (r.nextInt(5) == 1) {
+        w.commit();
+      }
+      return w.getReader(applyDeletions);
+    } else {
+      if (LuceneTestCase.VERBOSE) {
+        System.out.println("RIW.getReader: open new reader");
+      }
+      w.commit();
+      if (r.nextBoolean()) {
+        return IndexReader.open(w.getDirectory(), new KeepOnlyLastCommitDeletionPolicy(), r.nextBoolean(), _TestUtil.nextInt(r, 1, 10));
+      } else {
+        return w.getReader(applyDeletions);
+      }
+    }
+  }
+
+  /**
+   * Close this writer.
+   * @see IndexWriter#close()
+   */
+  public void close() throws IOException {
+    // if someone isn't using getReader() API, we want to be sure to
+    // maybeOptimize since presumably they might open a reader on the dir.
+    if (getReaderCalled == false && r.nextInt(8) == 2) {
+      doRandomOptimize();
+    }
+    w.close();
+  }
+
+  /**
+   * Forces an optimize.
+   * <p>
+   * NOTE: this should be avoided in tests unless absolutely necessary,
+   * as it will result in less test coverage.
+   * @see IndexWriter#optimize()
+   */
+  public void optimize() throws IOException {
+    w.optimize();
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/index/SlowMultiReaderWrapper.java b/lucene/backwards/src/test-framework/org/apache/lucene/index/SlowMultiReaderWrapper.java
new file mode 100644
index 0000000..be93a34
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/index/SlowMultiReaderWrapper.java
@@ -0,0 +1,49 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+
+import org.apache.lucene.util.ReaderUtil;
+
+/**
+ * Acts like Lucene 4.x's SlowMultiReaderWrapper for testing 
+ * of top-level MultiTermEnum, MultiTermDocs, ...
+ */
+public class SlowMultiReaderWrapper extends MultiReader {
+
+  public SlowMultiReaderWrapper(IndexReader reader) {
+    super(subReaders(reader));
+  }
+  
+  private static IndexReader[] subReaders(IndexReader reader) {
+    ArrayList<IndexReader> list = new ArrayList<IndexReader>();
+    ReaderUtil.gatherSubReaders(list, reader);
+    return list.toArray(new IndexReader[list.size()]);
+  }
+
+  @Override
+  public IndexReader[] getSequentialSubReaders() {
+    return null;
+  }
+
+  @Override
+  public String toString() {
+    return "SlowMultiReaderWrapper(" + super.toString() + ")";
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/queryParser/QueryParserTestBase.java b/lucene/backwards/src/test-framework/org/apache/lucene/queryParser/QueryParserTestBase.java
new file mode 100644
index 0000000..9bf46ba
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/queryParser/QueryParserTestBase.java
@@ -0,0 +1,1128 @@
+package org.apache.lucene.queryParser;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+import java.text.Collator;
+import java.text.DateFormat;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.GregorianCalendar;
+import java.util.HashSet;
+import java.util.Locale;
+import java.util.Set;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.KeywordAnalyzer;
+import org.apache.lucene.analysis.LowerCaseTokenizer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.StopAnalyzer;
+import org.apache.lucene.analysis.StopFilter;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.document.DateField;
+import org.apache.lucene.document.DateTools;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.FuzzyQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.search.PrefixQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermRangeQuery;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.WildcardQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Base Test class for QueryParser subclasses
+ */
+// TODO: it would be better to refactor the parts that are specific really
+// to the core QP and subclass/use the parts that are not in the contrib QP
+public abstract class QueryParserTestBase extends LuceneTestCase {
+  
+  public static Analyzer qpAnalyzer = new QPTestAnalyzer();
+
+  public static final class QPTestFilter extends TokenFilter {
+    CharTermAttribute termAtt;
+    OffsetAttribute offsetAtt;
+        
+    /**
+     * Filter which discards the token 'stop' and which expands the
+     * token 'phrase' into 'phrase1 phrase2'
+     */
+    public QPTestFilter(TokenStream in) {
+      super(in);
+      termAtt = addAttribute(CharTermAttribute.class);
+      offsetAtt = addAttribute(OffsetAttribute.class);
+    }
+
+    boolean inPhrase = false;
+    int savedStart = 0, savedEnd = 0;
+
+    @Override
+    public boolean incrementToken() throws IOException {
+      if (inPhrase) {
+        inPhrase = false;
+        clearAttributes();
+        termAtt.append("phrase2");
+        offsetAtt.setOffset(savedStart, savedEnd);
+        return true;
+      } else
+        while (input.incrementToken()) {
+          if (termAtt.toString().equals("phrase")) {
+            inPhrase = true;
+            savedStart = offsetAtt.startOffset();
+            savedEnd = offsetAtt.endOffset();
+            termAtt.setEmpty().append("phrase1");
+            offsetAtt.setOffset(savedStart, savedEnd);
+            return true;
+          } else if (!termAtt.toString().equals("stop"))
+            return true;
+        }
+      return false;
+    }
+  }
+
+  
+  public static final class QPTestAnalyzer extends Analyzer {
+
+    /** Filters LowerCaseTokenizer with StopFilter. */
+    @Override
+    public final TokenStream tokenStream(String fieldName, Reader reader) {
+      return new QPTestFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader));
+    }
+  }
+
+  public static class QPTestParser extends QueryParser {
+    public QPTestParser(String f, Analyzer a) {
+      super(TEST_VERSION_CURRENT, f, a);
+    }
+
+    @Override
+    protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException {
+      throw new ParseException("Fuzzy queries not allowed");
+    }
+
+    @Override
+    protected Query getWildcardQuery(String field, String termStr) throws ParseException {
+      throw new ParseException("Wildcard queries not allowed");
+    }
+  }
+
+  private int originalMaxClauses;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    originalMaxClauses = BooleanQuery.getMaxClauseCount();
+  }
+
+  public abstract QueryParser getParser(Analyzer a) throws Exception;
+
+  public Query getQuery(String query, Analyzer a) throws Exception {
+    return getParser(a).parse(query);
+  }
+
+  public void assertQueryEquals(String query, Analyzer a, String result)
+    throws Exception {
+    Query q = getQuery(query, a);
+    String s = q.toString("field");
+    if (!s.equals(result)) {
+      fail("Query /" + query + "/ yielded /" + s
+           + "/, expecting /" + result + "/");
+    }
+  }
+
+  public void assertQueryEquals(QueryParser qp, String field, String query, String result) 
+    throws Exception {
+    Query q = qp.parse(query);
+    String s = q.toString(field);
+    if (!s.equals(result)) {
+      fail("Query /" + query + "/ yielded /" + s
+           + "/, expecting /" + result + "/");
+    }
+  }
+  
+  public void assertEscapedQueryEquals(String query, Analyzer a, String result)
+    throws Exception {
+    String escapedQuery = QueryParser.escape(query);
+    if (!escapedQuery.equals(result)) {
+      fail("Query /" + query + "/ yielded /" + escapedQuery
+          + "/, expecting /" + result + "/");
+    }
+  }
+
+  public void assertWildcardQueryEquals(String query, boolean lowercase, String result, boolean allowLeadingWildcard)
+    throws Exception {
+    QueryParser qp = getParser(null);
+    qp.setLowercaseExpandedTerms(lowercase);
+    qp.setAllowLeadingWildcard(allowLeadingWildcard);
+    Query q = qp.parse(query);
+    String s = q.toString("field");
+    if (!s.equals(result)) {
+      fail("WildcardQuery /" + query + "/ yielded /" + s
+           + "/, expecting /" + result + "/");
+    }
+  }
+
+  public void assertWildcardQueryEquals(String query, boolean lowercase, String result)
+    throws Exception {
+    assertWildcardQueryEquals(query, lowercase, result, false);
+  }
+
+  public void assertWildcardQueryEquals(String query, String result) throws Exception {
+    QueryParser qp = getParser(null);
+    Query q = qp.parse(query);
+    String s = q.toString("field");
+    if (!s.equals(result)) {
+      fail("WildcardQuery /" + query + "/ yielded /" + s + "/, expecting /"
+          + result + "/");
+    }
+  }
+
+  public Query getQueryDOA(String query, Analyzer a)
+    throws Exception {
+    if (a == null)
+      a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a);
+    qp.setDefaultOperator(QueryParser.AND_OPERATOR);
+    return qp.parse(query);
+  }
+
+  public void assertQueryEqualsDOA(String query, Analyzer a, String result)
+    throws Exception {
+    Query q = getQueryDOA(query, a);
+    String s = q.toString("field");
+    if (!s.equals(result)) {
+      fail("Query /" + query + "/ yielded /" + s
+           + "/, expecting /" + result + "/");
+    }
+  }
+
+  public void testCJK() throws Exception {
+   // Test Ideographic Space - As wide as a CJK character cell (fullwidth)
+   // used google to translate the word "term" to japanese -> 用語
+   assertQueryEquals("term\u3000term\u3000term", null, "term\u0020term\u0020term");
+   assertQueryEquals("用語\u3000用語\u3000用語", null, "用語\u0020用語\u0020用語");
+  }
+
+  public void testCJKTerm() throws Exception {
+    // individual CJK chars as terms
+    StandardAnalyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); 
+    
+    BooleanQuery expected = new BooleanQuery();
+    expected.add(new TermQuery(new Term("field", "中")), BooleanClause.Occur.SHOULD);
+    expected.add(new TermQuery(new Term("field", "国")), BooleanClause.Occur.SHOULD);
+    
+    assertEquals(expected, getQuery("中国", analyzer));
+  }
+  
+  public void testCJKBoostedTerm() throws Exception {
+    // individual CJK chars as terms
+    StandardAnalyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
+    
+    BooleanQuery expected = new BooleanQuery();
+    expected.setBoost(0.5f);
+    expected.add(new TermQuery(new Term("field", "中")), BooleanClause.Occur.SHOULD);
+    expected.add(new TermQuery(new Term("field", "国")), BooleanClause.Occur.SHOULD);
+    
+    assertEquals(expected, getQuery("中国^0.5", analyzer));
+  }
+  
+  public void testCJKPhrase() throws Exception {
+    // individual CJK chars as terms
+    StandardAnalyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
+    
+    PhraseQuery expected = new PhraseQuery();
+    expected.add(new Term("field", "中"));
+    expected.add(new Term("field", "国"));
+    
+    assertEquals(expected, getQuery("\"中国\"", analyzer));
+  }
+  
+  public void testCJKBoostedPhrase() throws Exception {
+    // individual CJK chars as terms
+    StandardAnalyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
+    
+    PhraseQuery expected = new PhraseQuery();
+    expected.setBoost(0.5f);
+    expected.add(new Term("field", "中"));
+    expected.add(new Term("field", "国"));
+    
+    assertEquals(expected, getQuery("\"中国\"^0.5", analyzer));
+  }
+  
+  public void testCJKSloppyPhrase() throws Exception {
+    // individual CJK chars as terms
+    StandardAnalyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
+    
+    PhraseQuery expected = new PhraseQuery();
+    expected.setSlop(3);
+    expected.add(new Term("field", "中"));
+    expected.add(new Term("field", "国"));
+    
+    assertEquals(expected, getQuery("\"中国\"~3", analyzer));
+  }
+  
+  public void testAutoGeneratePhraseQueriesOn() throws Exception {
+    // individual CJK chars as terms
+    StandardAnalyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
+  
+    PhraseQuery expected = new PhraseQuery();
+    expected.add(new Term("field", "中"));
+    expected.add(new Term("field", "国"));
+    QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "field", analyzer);
+    parser.setAutoGeneratePhraseQueries(true);
+    assertEquals(expected, parser.parse("中国"));
+  }
+
+  public void testSimple() throws Exception {
+    assertQueryEquals("term term term", null, "term term term");
+    assertQueryEquals("türm term term", new MockAnalyzer(random), "türm term term");
+    assertQueryEquals("ümlaut", new MockAnalyzer(random), "ümlaut");
+
+    assertQueryEquals("\"\"", new KeywordAnalyzer(), "");
+    assertQueryEquals("foo:\"\"", new KeywordAnalyzer(), "foo:");
+
+    assertQueryEquals("a AND b", null, "+a +b");
+    assertQueryEquals("(a AND b)", null, "+a +b");
+    assertQueryEquals("c OR (a AND b)", null, "c (+a +b)");
+    assertQueryEquals("a AND NOT b", null, "+a -b");
+    assertQueryEquals("a AND -b", null, "+a -b");
+    assertQueryEquals("a AND !b", null, "+a -b");
+    assertQueryEquals("a && b", null, "+a +b");
+    assertQueryEquals("a && ! b", null, "+a -b");
+
+    assertQueryEquals("a OR b", null, "a b");
+    assertQueryEquals("a || b", null, "a b");
+    assertQueryEquals("a OR !b", null, "a -b");
+    assertQueryEquals("a OR ! b", null, "a -b");
+    assertQueryEquals("a OR -b", null, "a -b");
+
+    assertQueryEquals("+term -term term", null, "+term -term term");
+    assertQueryEquals("foo:term AND field:anotherTerm", null,
+                      "+foo:term +anotherterm");
+    assertQueryEquals("term AND \"phrase phrase\"", null,
+                      "+term +\"phrase phrase\"");
+    assertQueryEquals("\"hello there\"", null, "\"hello there\"");
+    assertTrue(getQuery("a AND b", null) instanceof BooleanQuery);
+    assertTrue(getQuery("hello", null) instanceof TermQuery);
+    assertTrue(getQuery("\"hello there\"", null) instanceof PhraseQuery);
+
+    assertQueryEquals("germ term^2.0", null, "germ term^2.0");
+    assertQueryEquals("(term)^2.0", null, "term^2.0");
+    assertQueryEquals("(germ term)^2.0", null, "(germ term)^2.0");
+    assertQueryEquals("term^2.0", null, "term^2.0");
+    assertQueryEquals("term^2", null, "term^2.0");
+    assertQueryEquals("\"germ term\"^2.0", null, "\"germ term\"^2.0");
+    assertQueryEquals("\"term germ\"^2", null, "\"term germ\"^2.0");
+
+    assertQueryEquals("(foo OR bar) AND (baz OR boo)", null,
+                      "+(foo bar) +(baz boo)");
+    assertQueryEquals("((a OR b) AND NOT c) OR d", null,
+                      "(+(a b) -c) d");
+    assertQueryEquals("+(apple \"steve jobs\") -(foo bar baz)", null,
+                      "+(apple \"steve jobs\") -(foo bar baz)");
+    assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null,
+                      "+(title:dog title:cat) -author:\"bob dole\"");
+    
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random));
+    // make sure OR is the default:
+    assertEquals(QueryParser.OR_OPERATOR, qp.getDefaultOperator());
+    qp.setDefaultOperator(QueryParser.AND_OPERATOR);
+    assertEquals(QueryParser.AND_OPERATOR, qp.getDefaultOperator());
+    qp.setDefaultOperator(QueryParser.OR_OPERATOR);
+    assertEquals(QueryParser.OR_OPERATOR, qp.getDefaultOperator());
+  }
+
+  public void testPunct() throws Exception {
+    Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
+    assertQueryEquals("a&b", a, "a&b");
+    assertQueryEquals("a&&b", a, "a&&b");
+    assertQueryEquals(".NET", a, ".NET");
+  }
+
+  public void testSlop() throws Exception {
+    assertQueryEquals("\"term germ\"~2", null, "\"term germ\"~2");
+    assertQueryEquals("\"term germ\"~2 flork", null, "\"term germ\"~2 flork");
+    assertQueryEquals("\"term\"~2", null, "term");
+    assertQueryEquals("\" \"~2 germ", null, "germ");
+    assertQueryEquals("\"term germ\"~2^2", null, "\"term germ\"~2^2.0");
+  }
+
+  public void testNumber() throws Exception {
+// The numbers go away because SimpleAnalzyer ignores them
+    assertQueryEquals("3", null, "");
+    assertQueryEquals("term 1.0 1 2", null, "term");
+    assertQueryEquals("term term1 term2", null, "term term term");
+
+    Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, true);
+    assertQueryEquals("3", a, "3");
+    assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
+    assertQueryEquals("term term1 term2", a, "term term1 term2");
+  }
+
+  public void testWildcard() throws Exception {
+    assertQueryEquals("term*", null, "term*");
+    assertQueryEquals("term*^2", null, "term*^2.0");
+    assertQueryEquals("term~", null, "term~0.5");
+    assertQueryEquals("term~0.7", null, "term~0.7");
+    assertQueryEquals("term~^2", null, "term~0.5^2.0");
+    assertQueryEquals("term^2~", null, "term~0.5^2.0");
+    assertQueryEquals("term*germ", null, "term*germ");
+    assertQueryEquals("term*germ^3", null, "term*germ^3.0");
+
+    assertTrue(getQuery("term*", null) instanceof PrefixQuery);
+    assertTrue(getQuery("term*^2", null) instanceof PrefixQuery);
+    assertTrue(getQuery("term~", null) instanceof FuzzyQuery);
+    assertTrue(getQuery("term~0.7", null) instanceof FuzzyQuery);
+    FuzzyQuery fq = (FuzzyQuery)getQuery("term~0.7", null);
+    assertEquals(0.7f, fq.getMinSimilarity(), 0.1f);
+    assertEquals(FuzzyQuery.defaultPrefixLength, fq.getPrefixLength());
+    fq = (FuzzyQuery)getQuery("term~", null);
+    assertEquals(0.5f, fq.getMinSimilarity(), 0.1f);
+    assertEquals(FuzzyQuery.defaultPrefixLength, fq.getPrefixLength());
+    
+    assertParseException("term~1.1"); // value > 1, throws exception
+
+    assertTrue(getQuery("term*germ", null) instanceof WildcardQuery);
+
+/* Tests to see that wild card terms are (or are not) properly
+   * lower-cased with propery parser configuration
+   */
+// First prefix queries:
+    // by default, convert to lowercase:
+    assertWildcardQueryEquals("Term*", true, "term*");
+    // explicitly set lowercase:
+    assertWildcardQueryEquals("term*", true, "term*");
+    assertWildcardQueryEquals("Term*", true, "term*");
+    assertWildcardQueryEquals("TERM*", true, "term*");
+    // explicitly disable lowercase conversion:
+    assertWildcardQueryEquals("term*", false, "term*");
+    assertWildcardQueryEquals("Term*", false, "Term*");
+    assertWildcardQueryEquals("TERM*", false, "TERM*");
+// Then 'full' wildcard queries:
+    // by default, convert to lowercase:
+    assertWildcardQueryEquals("Te?m", "te?m");
+    // explicitly set lowercase:
+    assertWildcardQueryEquals("te?m", true, "te?m");
+    assertWildcardQueryEquals("Te?m", true, "te?m");
+    assertWildcardQueryEquals("TE?M", true, "te?m");
+    assertWildcardQueryEquals("Te?m*gerM", true, "te?m*germ");
+    // explicitly disable lowercase conversion:
+    assertWildcardQueryEquals("te?m", false, "te?m");
+    assertWildcardQueryEquals("Te?m", false, "Te?m");
+    assertWildcardQueryEquals("TE?M", false, "TE?M");
+    assertWildcardQueryEquals("Te?m*gerM", false, "Te?m*gerM");
+//  Fuzzy queries:
+    assertWildcardQueryEquals("Term~", "term~0.5");
+    assertWildcardQueryEquals("Term~", true, "term~0.5");
+    assertWildcardQueryEquals("Term~", false, "Term~0.5");
+//  Range queries:
+    assertWildcardQueryEquals("[A TO C]", "[a TO c]");
+    assertWildcardQueryEquals("[A TO C]", true, "[a TO c]");
+    assertWildcardQueryEquals("[A TO C]", false, "[A TO C]");
+    // Test suffix queries: first disallow
+    try {
+      assertWildcardQueryEquals("*Term", true, "*term");
+      fail();
+    } catch(ParseException pe) {
+      // expected exception
+    }
+    try {
+      assertWildcardQueryEquals("?Term", true, "?term");
+      fail();
+    } catch(ParseException pe) {
+      // expected exception
+    }
+    // Test suffix queries: then allow
+    assertWildcardQueryEquals("*Term", true, "*term", true);
+    assertWildcardQueryEquals("?Term", true, "?term", true);
+  }
+  
+  public void testLeadingWildcardType() throws Exception {
+    QueryParser qp = getParser(null);
+    qp.setAllowLeadingWildcard(true);
+    assertEquals(WildcardQuery.class, qp.parse("t*erm*").getClass());
+    assertEquals(WildcardQuery.class, qp.parse("?term*").getClass());
+    assertEquals(WildcardQuery.class, qp.parse("*term*").getClass());
+  }
+
+  public void testQPA() throws Exception {
+    assertQueryEquals("term term^3.0 term", qpAnalyzer, "term term^3.0 term");
+    assertQueryEquals("term stop^3.0 term", qpAnalyzer, "term term");
+    
+    assertQueryEquals("term term term", qpAnalyzer, "term term term");
+    assertQueryEquals("term +stop term", qpAnalyzer, "term term");
+    assertQueryEquals("term -stop term", qpAnalyzer, "term term");
+
+    assertQueryEquals("drop AND (stop) AND roll", qpAnalyzer, "+drop +roll");
+    assertQueryEquals("term +(stop) term", qpAnalyzer, "term term");
+    assertQueryEquals("term -(stop) term", qpAnalyzer, "term term");
+    
+    assertQueryEquals("drop AND stop AND roll", qpAnalyzer, "+drop +roll");
+    assertQueryEquals("term phrase term", qpAnalyzer,
+                      "term (phrase1 phrase2) term");
+    assertQueryEquals("term AND NOT phrase term", qpAnalyzer,
+                      "+term -(phrase1 phrase2) term");
+    assertQueryEquals("stop^3", qpAnalyzer, "");
+    assertQueryEquals("stop", qpAnalyzer, "");
+    assertQueryEquals("(stop)^3", qpAnalyzer, "");
+    assertQueryEquals("((stop))^3", qpAnalyzer, "");
+    assertQueryEquals("(stop^3)", qpAnalyzer, "");
+    assertQueryEquals("((stop)^3)", qpAnalyzer, "");
+    assertQueryEquals("(stop)", qpAnalyzer, "");
+    assertQueryEquals("((stop))", qpAnalyzer, "");
+    assertTrue(getQuery("term term term", qpAnalyzer) instanceof BooleanQuery);
+    assertTrue(getQuery("term +stop", qpAnalyzer) instanceof TermQuery);
+  }
+
+  public void testRange() throws Exception {
+    assertQueryEquals("[ a TO z]", null, "[a TO z]");
+    assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((TermRangeQuery)getQuery("[ a TO z]", null)).getRewriteMethod());
+
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.SIMPLE, true));
+    qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
+    assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE,((TermRangeQuery)qp.parse("[ a TO z]")).getRewriteMethod());
+    
+    assertQueryEquals("[ a TO z ]", null, "[a TO z]");
+    assertQueryEquals("{ a TO z}", null, "{a TO z}");
+    assertQueryEquals("{ a TO z }", null, "{a TO z}");
+    assertQueryEquals("{ a TO z }^2.0", null, "{a TO z}^2.0");
+    assertQueryEquals("[ a TO z] OR bar", null, "[a TO z] bar");
+    assertQueryEquals("[ a TO z] AND bar", null, "+[a TO z] +bar");
+    assertQueryEquals("( bar blar { a TO z}) ", null, "bar blar {a TO z}");
+    assertQueryEquals("gack ( bar blar { a TO z}) ", null, "gack (bar blar {a TO z})");
+  }
+    
+  public void testFarsiRangeCollating() throws Exception {
+    Directory ramDir = newDirectory();
+    IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+    Document doc = new Document();
+    doc.add(newField("content","\u0633\u0627\u0628", 
+                      Field.Store.YES, Field.Index.NOT_ANALYZED));
+    iw.addDocument(doc);
+    iw.close();
+    IndexSearcher is = new IndexSearcher(ramDir, true);
+
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "content", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
+
+    // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
+    // RuleBasedCollator.  However, the Arabic Locale seems to order the Farsi
+    // characters properly.
+    Collator c = Collator.getInstance(new Locale("ar"));
+    qp.setRangeCollator(c);
+
+    // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
+    // orders the U+0698 character before the U+0633 character, so the single
+    // index Term below should NOT be returned by a ConstantScoreRangeQuery
+    // with a Farsi Collator (or an Arabic one for the case when Farsi is not
+    // supported).
+      
+    // Test ConstantScoreRangeQuery
+    qp.setMultiTermRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
+    ScoreDoc[] result = is.search(qp.parse("[ \u062F TO \u0698 ]"), null, 1000).scoreDocs;
+    assertEquals("The index Term should not be included.", 0, result.length);
+
+    result = is.search(qp.parse("[ \u0633 TO \u0638 ]"), null, 1000).scoreDocs;
+    assertEquals("The index Term should be included.", 1, result.length);
+
+    // Test TermRangeQuery
+    qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
+    result = is.search(qp.parse("[ \u062F TO \u0698 ]"), null, 1000).scoreDocs;
+    assertEquals("The index Term should not be included.", 0, result.length);
+
+    result = is.search(qp.parse("[ \u0633 TO \u0638 ]"), null, 1000).scoreDocs;
+    assertEquals("The index Term should be included.", 1, result.length);
+
+    is.close();
+    ramDir.close();
+  }
+  
+  private String escapeDateString(String s) {
+    if (s.indexOf(" ") > -1) {
+      return "\"" + s + "\"";
+    } else {
+      return s;
+    }
+  }
+  
+  /** for testing legacy DateField support */
+  private String getLegacyDate(String s) throws Exception {
+    DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT);
+    return DateField.dateToString(df.parse(s));
+  }
+
+  /** for testing DateTools support */
+  private String getDate(String s, DateTools.Resolution resolution) throws Exception {
+    DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT);
+    return getDate(df.parse(s), resolution);      
+  }
+  
+  /** for testing DateTools support */
+  private String getDate(Date d, DateTools.Resolution resolution) throws Exception {
+      if (resolution == null) {
+        return DateField.dateToString(d);      
+      } else {
+        return DateTools.dateToString(d, resolution);
+      }
+    }
+  
+  private String getLocalizedDate(int year, int month, int day) {
+    DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT);
+    Calendar calendar = new GregorianCalendar();
+    calendar.clear();
+    calendar.set(year, month, day);
+    calendar.set(Calendar.HOUR_OF_DAY, 23);
+    calendar.set(Calendar.MINUTE, 59);
+    calendar.set(Calendar.SECOND, 59);
+    calendar.set(Calendar.MILLISECOND, 999);
+    return df.format(calendar.getTime());
+  }
+
+  /** for testing legacy DateField support */
+  public void testLegacyDateRange() throws Exception {
+    String startDate = getLocalizedDate(2002, 1, 1);
+    String endDate = getLocalizedDate(2002, 1, 4);
+    Calendar endDateExpected = new GregorianCalendar();
+    endDateExpected.clear();
+    endDateExpected.set(2002, 1, 4, 23, 59, 59);
+    endDateExpected.set(Calendar.MILLISECOND, 999);
+    assertQueryEquals("[ " + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "]", null,
+                      "[" + getLegacyDate(startDate) + " TO " + DateField.dateToString(endDateExpected.getTime()) + "]");
+    assertQueryEquals("{  " + escapeDateString(startDate) + "    " + escapeDateString(endDate) + "   }", null,
+                      "{" + getLegacyDate(startDate) + " TO " + getLegacyDate(endDate) + "}");
+  }
+  
+  public void testDateRange() throws Exception {
+    String startDate = getLocalizedDate(2002, 1, 1);
+    String endDate = getLocalizedDate(2002, 1, 4);
+    Calendar endDateExpected = new GregorianCalendar();
+    endDateExpected.clear();
+    endDateExpected.set(2002, 1, 4, 23, 59, 59);
+    endDateExpected.set(Calendar.MILLISECOND, 999);
+    final String defaultField = "default";
+    final String monthField = "month";
+    final String hourField = "hour";
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.SIMPLE, true));
+    
+    // Don't set any date resolution and verify if DateField is used
+    assertDateRangeQueryEquals(qp, defaultField, startDate, endDate, 
+                               endDateExpected.getTime(), null);
+    
+    // set a field specific date resolution
+    qp.setDateResolution(monthField, DateTools.Resolution.MONTH);
+    
+    // DateField should still be used for defaultField
+    assertDateRangeQueryEquals(qp, defaultField, startDate, endDate, 
+                               endDateExpected.getTime(), null);
+    
+    // set default date resolution to MILLISECOND 
+    qp.setDateResolution(DateTools.Resolution.MILLISECOND);
+    
+    // set second field specific date resolution    
+    qp.setDateResolution(hourField, DateTools.Resolution.HOUR);
+
+    // for this field no field specific date resolution has been set,
+    // so verify if the default resolution is used
+    assertDateRangeQueryEquals(qp, defaultField, startDate, endDate, 
+            endDateExpected.getTime(), DateTools.Resolution.MILLISECOND);
+
+    // verify if field specific date resolutions are used for these two fields
+    assertDateRangeQueryEquals(qp, monthField, startDate, endDate, 
+            endDateExpected.getTime(), DateTools.Resolution.MONTH);
+
+    assertDateRangeQueryEquals(qp, hourField, startDate, endDate, 
+            endDateExpected.getTime(), DateTools.Resolution.HOUR);  
+  }
+  
+  public void assertDateRangeQueryEquals(QueryParser qp, String field, String startDate, String endDate, 
+                                         Date endDateInclusive, DateTools.Resolution resolution) throws Exception {
+    assertQueryEquals(qp, field, field + ":[" + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "]",
+               "[" + getDate(startDate, resolution) + " TO " + getDate(endDateInclusive, resolution) + "]");
+    assertQueryEquals(qp, field, field + ":{" + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "}",
+               "{" + getDate(startDate, resolution) + " TO " + getDate(endDate, resolution) + "}");
+  }
+
+  public void testEscaped() throws Exception {
+    Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
+    
+    /*assertQueryEquals("\\[brackets", a, "\\[brackets");
+    assertQueryEquals("\\[brackets", null, "brackets");
+    assertQueryEquals("\\\\", a, "\\\\");
+    assertQueryEquals("\\+blah", a, "\\+blah");
+    assertQueryEquals("\\(blah", a, "\\(blah");
+
+    assertQueryEquals("\\-blah", a, "\\-blah");
+    assertQueryEquals("\\!blah", a, "\\!blah");
+    assertQueryEquals("\\{blah", a, "\\{blah");
+    assertQueryEquals("\\}blah", a, "\\}blah");
+    assertQueryEquals("\\:blah", a, "\\:blah");
+    assertQueryEquals("\\^blah", a, "\\^blah");
+    assertQueryEquals("\\[blah", a, "\\[blah");
+    assertQueryEquals("\\]blah", a, "\\]blah");
+    assertQueryEquals("\\\"blah", a, "\\\"blah");
+    assertQueryEquals("\\(blah", a, "\\(blah");
+    assertQueryEquals("\\)blah", a, "\\)blah");
+    assertQueryEquals("\\~blah", a, "\\~blah");
+    assertQueryEquals("\\*blah", a, "\\*blah");
+    assertQueryEquals("\\?blah", a, "\\?blah");
+    //assertQueryEquals("foo \\&\\& bar", a, "foo \\&\\& bar");
+    //assertQueryEquals("foo \\|| bar", a, "foo \\|| bar");
+    //assertQueryEquals("foo \\AND bar", a, "foo \\AND bar");*/
+
+    assertQueryEquals("\\a", a, "a");
+    
+    assertQueryEquals("a\\-b:c", a, "a-b:c");
+    assertQueryEquals("a\\+b:c", a, "a+b:c");
+    assertQueryEquals("a\\:b:c", a, "a:b:c");
+    assertQueryEquals("a\\\\b:c", a, "a\\b:c");
+
+    assertQueryEquals("a:b\\-c", a, "a:b-c");
+    assertQueryEquals("a:b\\+c", a, "a:b+c");
+    assertQueryEquals("a:b\\:c", a, "a:b:c");
+    assertQueryEquals("a:b\\\\c", a, "a:b\\c");
+
+    assertQueryEquals("a:b\\-c*", a, "a:b-c*");
+    assertQueryEquals("a:b\\+c*", a, "a:b+c*");
+    assertQueryEquals("a:b\\:c*", a, "a:b:c*");
+
+    assertQueryEquals("a:b\\\\c*", a, "a:b\\c*");
+
+    assertQueryEquals("a:b\\-?c", a, "a:b-?c");
+    assertQueryEquals("a:b\\+?c", a, "a:b+?c");
+    assertQueryEquals("a:b\\:?c", a, "a:b:?c");
+
+    assertQueryEquals("a:b\\\\?c", a, "a:b\\?c");
+
+    assertQueryEquals("a:b\\-c~", a, "a:b-c~0.5");
+    assertQueryEquals("a:b\\+c~", a, "a:b+c~0.5");
+    assertQueryEquals("a:b\\:c~", a, "a:b:c~0.5");
+    assertQueryEquals("a:b\\\\c~", a, "a:b\\c~0.5");
+
+    assertQueryEquals("[ a\\- TO a\\+ ]", null, "[a- TO a+]");
+    assertQueryEquals("[ a\\: TO a\\~ ]", null, "[a: TO a~]");
+    assertQueryEquals("[ a\\\\ TO a\\* ]", null, "[a\\ TO a*]");
+
+    assertQueryEquals("[\"c\\:\\\\temp\\\\\\~foo0.txt\" TO \"c\\:\\\\temp\\\\\\~foo9.txt\"]", a, 
+                      "[c:\\temp\\~foo0.txt TO c:\\temp\\~foo9.txt]");
+    
+    assertQueryEquals("a\\\\\\+b", a, "a\\+b");
+    
+    assertQueryEquals("a \\\"b c\\\" d", a, "a \"b c\" d");
+    assertQueryEquals("\"a \\\"b c\\\" d\"", a, "\"a \"b c\" d\"");
+    assertQueryEquals("\"a \\+b c d\"", a, "\"a +b c d\"");
+    
+    assertQueryEquals("c\\:\\\\temp\\\\\\~foo.txt", a, "c:\\temp\\~foo.txt");
+    
+    assertParseException("XY\\"); // there must be a character after the escape char
+    
+    // test unicode escaping
+    assertQueryEquals("a\\u0062c", a, "abc");
+    assertQueryEquals("XY\\u005a", a, "XYZ");
+    assertQueryEquals("XY\\u005A", a, "XYZ");
+    assertQueryEquals("\"a \\\\\\u0028\\u0062\\\" c\"", a, "\"a \\(b\" c\"");
+    
+    assertParseException("XY\\u005G");  // test non-hex character in escaped unicode sequence
+    assertParseException("XY\\u005");   // test incomplete escaped unicode sequence
+    
+    // Tests bug LUCENE-800
+    assertQueryEquals("(item:\\\\ item:ABCD\\\\)", a, "item:\\ item:ABCD\\");
+    assertParseException("(item:\\\\ item:ABCD\\\\))"); // unmatched closing paranthesis 
+    assertQueryEquals("\\*", a, "*");
+    assertQueryEquals("\\\\", a, "\\");  // escaped backslash
+    
+    assertParseException("\\"); // a backslash must always be escaped
+    
+    // LUCENE-1189
+    assertQueryEquals("(\"a\\\\\") or (\"b\")", a ,"a\\ or b");
+  }
+
+  public void testQueryStringEscaping() throws Exception {
+    Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
+
+    assertEscapedQueryEquals("a-b:c", a, "a\\-b\\:c");
+    assertEscapedQueryEquals("a+b:c", a, "a\\+b\\:c");
+    assertEscapedQueryEquals("a:b:c", a, "a\\:b\\:c");
+    assertEscapedQueryEquals("a\\b:c", a, "a\\\\b\\:c");
+
+    assertEscapedQueryEquals("a:b-c", a, "a\\:b\\-c");
+    assertEscapedQueryEquals("a:b+c", a, "a\\:b\\+c");
+    assertEscapedQueryEquals("a:b:c", a, "a\\:b\\:c");
+    assertEscapedQueryEquals("a:b\\c", a, "a\\:b\\\\c");
+
+    assertEscapedQueryEquals("a:b-c*", a, "a\\:b\\-c\\*");
+    assertEscapedQueryEquals("a:b+c*", a, "a\\:b\\+c\\*");
+    assertEscapedQueryEquals("a:b:c*", a, "a\\:b\\:c\\*");
+
+    assertEscapedQueryEquals("a:b\\\\c*", a, "a\\:b\\\\\\\\c\\*");
+
+    assertEscapedQueryEquals("a:b-?c", a, "a\\:b\\-\\?c");
+    assertEscapedQueryEquals("a:b+?c", a, "a\\:b\\+\\?c");
+    assertEscapedQueryEquals("a:b:?c", a, "a\\:b\\:\\?c");
+
+    assertEscapedQueryEquals("a:b?c", a, "a\\:b\\?c");
+
+    assertEscapedQueryEquals("a:b-c~", a, "a\\:b\\-c\\~");
+    assertEscapedQueryEquals("a:b+c~", a, "a\\:b\\+c\\~");
+    assertEscapedQueryEquals("a:b:c~", a, "a\\:b\\:c\\~");
+    assertEscapedQueryEquals("a:b\\c~", a, "a\\:b\\\\c\\~");
+
+    assertEscapedQueryEquals("[ a - TO a+ ]", null, "\\[ a \\- TO a\\+ \\]");
+    assertEscapedQueryEquals("[ a : TO a~ ]", null, "\\[ a \\: TO a\\~ \\]");
+    assertEscapedQueryEquals("[ a\\ TO a* ]", null, "\\[ a\\\\ TO a\\* \\]");
+    
+    // LUCENE-881
+    assertEscapedQueryEquals("|| abc ||", a, "\\|\\| abc \\|\\|");
+    assertEscapedQueryEquals("&& abc &&", a, "\\&\\& abc \\&\\&");
+  }
+  
+  public void testTabNewlineCarriageReturn()
+    throws Exception {
+    assertQueryEqualsDOA("+weltbank +worlbank", null,
+      "+weltbank +worlbank");
+
+    assertQueryEqualsDOA("+weltbank\n+worlbank", null,
+      "+weltbank +worlbank");
+    assertQueryEqualsDOA("weltbank \n+worlbank", null,
+      "+weltbank +worlbank");
+    assertQueryEqualsDOA("weltbank \n +worlbank", null,
+      "+weltbank +worlbank");
+
+    assertQueryEqualsDOA("+weltbank\r+worlbank", null,
+      "+weltbank +worlbank");
+    assertQueryEqualsDOA("weltbank \r+worlbank", null,
+      "+weltbank +worlbank");
+    assertQueryEqualsDOA("weltbank \r +worlbank", null,
+      "+weltbank +worlbank");
+
+    assertQueryEqualsDOA("+weltbank\r\n+worlbank", null,
+      "+weltbank +worlbank");
+    assertQueryEqualsDOA("weltbank \r\n+worlbank", null,
+      "+weltbank +worlbank");
+    assertQueryEqualsDOA("weltbank \r\n +worlbank", null,
+      "+weltbank +worlbank");
+    assertQueryEqualsDOA("weltbank \r \n +worlbank", null,
+      "+weltbank +worlbank");
+
+    assertQueryEqualsDOA("+weltbank\t+worlbank", null,
+      "+weltbank +worlbank");
+    assertQueryEqualsDOA("weltbank \t+worlbank", null,
+      "+weltbank +worlbank");
+    assertQueryEqualsDOA("weltbank \t +worlbank", null,
+      "+weltbank +worlbank");
+  }
+
+  public void testSimpleDAO()
+    throws Exception {
+    assertQueryEqualsDOA("term term term", null, "+term +term +term");
+    assertQueryEqualsDOA("term +term term", null, "+term +term +term");
+    assertQueryEqualsDOA("term term +term", null, "+term +term +term");
+    assertQueryEqualsDOA("term +term +term", null, "+term +term +term");
+    assertQueryEqualsDOA("-term term term", null, "-term +term +term");
+  }
+
+  public void testBoost()
+    throws Exception {
+    Set<Object> stopWords = new HashSet<Object>(1);
+    stopWords.add("on");
+    StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(TEST_VERSION_CURRENT, stopWords);
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", oneStopAnalyzer);
+    Query q = qp.parse("on^1.0");
+    assertNotNull(q);
+    q = qp.parse("\"hello\"^2.0");
+    assertNotNull(q);
+    assertEquals(q.getBoost(), (float) 2.0, (float) 0.5);
+    q = qp.parse("hello^2.0");
+    assertNotNull(q);
+    assertEquals(q.getBoost(), (float) 2.0, (float) 0.5);
+    q = qp.parse("\"on\"^1.0");
+    assertNotNull(q);
+
+    QueryParser qp2 = new QueryParser(TEST_VERSION_CURRENT, "field", new StandardAnalyzer(TEST_VERSION_CURRENT));
+    q = qp2.parse("the^3");
+    // "the" is a stop word so the result is an empty query:
+    assertNotNull(q);
+    assertEquals("", q.toString());
+    assertEquals(1.0f, q.getBoost(), 0.01f);
+  }
+
+  public void assertParseException(String queryString) throws Exception {
+    try {
+      getQuery(queryString, null);
+    } catch (ParseException expected) {
+      return;
+    }
+    fail("ParseException expected, not thrown");
+  }
+       
+  public void testException() throws Exception {
+    assertParseException("\"some phrase");
+    assertParseException("(foo bar");
+    assertParseException("foo bar))");
+    assertParseException("field:term:with:colon some more terms");
+    assertParseException("(sub query)^5.0^2.0 plus more");
+    assertParseException("secret AND illegal) AND access:confidential");
+  }
+  
+
+  public void testCustomQueryParserWildcard() {
+    try {
+      new QPTestParser("contents", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("a?t");
+      fail("Wildcard queries should not be allowed");
+    } catch (ParseException expected) {
+      // expected exception
+    }
+  }
+
+  public void testCustomQueryParserFuzzy() throws Exception {
+    try {
+      new QPTestParser("contents", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("xunit~");
+      fail("Fuzzy queries should not be allowed");
+    } catch (ParseException expected) {
+      // expected exception
+    }
+  }
+
+  public void testBooleanQuery() throws Exception {
+    BooleanQuery.setMaxClauseCount(2);
+    try {
+      QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
+      qp.parse("one two three");
+      fail("ParseException expected due to too many boolean clauses");
+    } catch (ParseException expected) {
+      // too many boolean clauses, so ParseException is expected
+    }
+  }
+
+  /**
+   * This test differs from TestPrecedenceQueryParser
+   */
+  public void testPrecedence() throws Exception {
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
+    Query query1 = qp.parse("A AND B OR C AND D");
+    Query query2 = qp.parse("+A +B +C +D");
+    assertEquals(query1, query2);
+  }
+
+  public void testLocalDateFormat() throws IOException, ParseException {
+    Directory ramDir = newDirectory();
+    IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+
+    addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
+    addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
+    iw.close();
+    IndexSearcher is = new IndexSearcher(ramDir, true);
+    assertHits(1, "[12/1/2005 TO 12/3/2005]", is);
+    assertHits(2, "[12/1/2005 TO 12/4/2005]", is);
+    assertHits(1, "[12/3/2005 TO 12/4/2005]", is);
+    assertHits(1, "{12/1/2005 TO 12/3/2005}", is);
+    assertHits(1, "{12/1/2005 TO 12/4/2005}", is);
+    assertHits(0, "{12/3/2005 TO 12/4/2005}", is);
+    is.close();
+    ramDir.close();
+  }
+
+  public void testStarParsing() throws Exception {
+    final int[] type = new int[1];
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)) {
+      @Override
+      protected Query getWildcardQuery(String field, String termStr) throws ParseException {
+        // override error checking of superclass
+        type[0]=1;
+        return new TermQuery(new Term(field,termStr));
+      }
+      @Override
+      protected Query getPrefixQuery(String field, String termStr) throws ParseException {
+        // override error checking of superclass
+        type[0]=2;        
+        return new TermQuery(new Term(field,termStr));
+      }
+
+      @Override
+      protected Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException {
+        type[0]=3;
+        return super.getFieldQuery(field, queryText, quoted);
+      }
+    };
+
+    TermQuery tq;
+
+    tq = (TermQuery)qp.parse("foo:zoo*");
+    assertEquals("zoo",tq.getTerm().text());
+    assertEquals(2,type[0]);
+
+    tq = (TermQuery)qp.parse("foo:zoo*^2");
+    assertEquals("zoo",tq.getTerm().text());
+    assertEquals(2,type[0]);
+    assertEquals(tq.getBoost(),2,0);
+
+    tq = (TermQuery)qp.parse("foo:*");
+    assertEquals("*",tq.getTerm().text());
+    assertEquals(1,type[0]);  // could be a valid prefix query in the future too
+
+    tq = (TermQuery)qp.parse("foo:*^2");
+    assertEquals("*",tq.getTerm().text());
+    assertEquals(1,type[0]);
+    assertEquals(tq.getBoost(),2,0);    
+
+    tq = (TermQuery)qp.parse("*:foo");
+    assertEquals("*",tq.getTerm().field());
+    assertEquals("foo",tq.getTerm().text());
+    assertEquals(3,type[0]);
+
+    tq = (TermQuery)qp.parse("*:*");
+    assertEquals("*",tq.getTerm().field());
+    assertEquals("*",tq.getTerm().text());
+    assertEquals(1,type[0]);  // could be handled as a prefix query in the future
+
+     tq = (TermQuery)qp.parse("(*:*)");
+    assertEquals("*",tq.getTerm().field());
+    assertEquals("*",tq.getTerm().text());
+    assertEquals(1,type[0]);
+
+  }
+
+  public void testStopwords() throws Exception {
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "a", new StopAnalyzer(TEST_VERSION_CURRENT, StopFilter.makeStopSet(TEST_VERSION_CURRENT, "the", "foo")));
+    Query result = qp.parse("a:the OR a:foo");
+    assertNotNull("result is null and it shouldn't be", result);
+    assertTrue("result is not a BooleanQuery", result instanceof BooleanQuery);
+    assertTrue(((BooleanQuery) result).clauses().size() + " does not equal: " + 0, ((BooleanQuery) result).clauses().size() == 0);
+    result = qp.parse("a:woo OR a:the");
+    assertNotNull("result is null and it shouldn't be", result);
+    assertTrue("result is not a TermQuery", result instanceof TermQuery);
+    result = qp.parse("(fieldX:xxxxx OR fieldy:xxxxxxxx)^2 AND (fieldx:the OR fieldy:foo)");
+    assertNotNull("result is null and it shouldn't be", result);
+    assertTrue("result is not a BooleanQuery", result instanceof BooleanQuery);
+    if (VERBOSE) System.out.println("Result: " + result);
+    assertTrue(((BooleanQuery) result).clauses().size() + " does not equal: " + 2, ((BooleanQuery) result).clauses().size() == 2);
+  }
+
+  public void testPositionIncrement() throws Exception {
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "a", new StopAnalyzer(TEST_VERSION_CURRENT, StopFilter.makeStopSet(TEST_VERSION_CURRENT, "the", "in", "are", "this")));
+    qp.setEnablePositionIncrements(true);
+    String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\"";
+    //               0         2                      5           7  8
+    int expectedPositions[] = {1,3,4,6,9};
+    PhraseQuery pq = (PhraseQuery) qp.parse(qtxt);
+    //System.out.println("Query text: "+qtxt);
+    //System.out.println("Result: "+pq);
+    Term t[] = pq.getTerms();
+    int pos[] = pq.getPositions();
+    for (int i = 0; i < t.length; i++) {
+      //System.out.println(i+". "+t[i]+"  pos: "+pos[i]);
+      assertEquals("term "+i+" = "+t[i]+" has wrong term-position!",expectedPositions[i],pos[i]);
+    }
+  }
+
+  public void testMatchAllDocs() throws Exception {
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
+    assertEquals(new MatchAllDocsQuery(), qp.parse("*:*"));
+    assertEquals(new MatchAllDocsQuery(), qp.parse("(*:*)"));
+    BooleanQuery bq = (BooleanQuery)qp.parse("+*:* -*:*");
+    assertTrue(bq.getClauses()[0].getQuery() instanceof MatchAllDocsQuery);
+    assertTrue(bq.getClauses()[1].getQuery() instanceof MatchAllDocsQuery);
+  }
+  
+  private void assertHits(int expected, String query, IndexSearcher is) throws ParseException, IOException {
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "date", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
+    qp.setLocale(Locale.ENGLISH);
+    Query q = qp.parse(query);
+    ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs;
+    assertEquals(expected, hits.length);
+  }
+
+  private void addDateDoc(String content, int year, int month,
+      int day, int hour, int minute, int second, IndexWriter iw) throws IOException {
+    Document d = new Document();
+    d.add(newField("f", content, Field.Store.YES, Field.Index.ANALYZED));
+    Calendar cal = Calendar.getInstance(Locale.ENGLISH);
+    cal.set(year, month-1, day, hour, minute, second);
+    d.add(newField("date", DateField.dateToString(cal.getTime()), Field.Store.YES, Field.Index.NOT_ANALYZED));
+    iw.addDocument(d);
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    BooleanQuery.setMaxClauseCount(originalMaxClauses);
+    super.tearDown();
+  }
+
+  // LUCENE-2002: make sure defaults for StandardAnalyzer's
+  // enableStopPositionIncr & QueryParser's enablePosIncr
+  // "match"
+  public void testPositionIncrements() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, a));
+    Document doc = new Document();
+    doc.add(newField("f", "the wizard of ozzy", Field.Store.NO, Field.Index.ANALYZED));
+    w.addDocument(doc);
+    IndexReader r = IndexReader.open(w, true);
+    w.close();
+    IndexSearcher s = newSearcher(r);
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "f", a);
+    Query q = qp.parse("\"wizard of ozzy\"");
+    assertEquals(1, s.search(q, 1).totalHits);
+    s.close();
+    r.close();
+    dir.close();
+  }
+
+  // LUCENE-2002: when we run javacc to regen QueryParser,
+  // we also run a replaceregexp step to fix 2 of the public
+  // ctors (change them to protected):
+  //
+  //   protected QueryParser(CharStream stream)
+  //
+  //   protected QueryParser(QueryParserTokenManager tm)
+  //
+  // This test is here as a safety, in case that ant step
+  // doesn't work for some reason.
+  public void testProtectedCtors() throws Exception {
+    try {
+      QueryParser.class.getConstructor(new Class[] {CharStream.class});
+      fail("please switch public QueryParser(CharStream) to be protected");
+    } catch (NoSuchMethodException nsme) {
+      // expected
+    }
+    try {
+      QueryParser.class.getConstructor(new Class[] {QueryParserTokenManager.class});
+      fail("please switch public QueryParser(QueryParserTokenManager) to be protected");
+    } catch (NoSuchMethodException nsme) {
+      // expected
+    }
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/search/AssertingIndexSearcher.java b/lucene/backwards/src/test-framework/org/apache/lucene/search/AssertingIndexSearcher.java
new file mode 100644
index 0000000..c347eb7
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/search/AssertingIndexSearcher.java
@@ -0,0 +1,89 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.concurrent.ExecutorService;
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+
+/** 
+ * Helper class that adds some extra checks to ensure correct
+ * usage of {@code IndexSearcher} and {@code Weight}.
+ * TODO: Extend this by more checks, that's just a start.
+ */
+public class AssertingIndexSearcher extends IndexSearcher {
+  public  AssertingIndexSearcher(IndexReader r) {
+    super(r);
+  }
+  
+  public  AssertingIndexSearcher(IndexReader r, ExecutorService ex) {
+    super(r, ex);
+  }
+  
+  // not anonymous because else not serializable (compare trunk)
+  private static final class UnmodifiableWeight extends Weight {
+    private final Weight w;
+    
+    UnmodifiableWeight(Weight w) {
+      this.w = w;
+    }
+  
+    @Override
+    public Explanation explain(IndexReader reader, int doc) throws IOException {
+      return w.explain(reader, doc);
+    }
+
+    @Override
+    public Query getQuery() {
+      return w.getQuery();
+    }
+
+    @Override
+    public float getValue() {
+      return w.getValue();
+    }
+
+    @Override
+    public void normalize(float norm) {
+      throw new IllegalStateException("Weight already normalized.");
+    }
+
+    @Override
+    public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
+      return w.scorer(reader, scoreDocsInOrder, topScorer);
+    }
+
+    @Override
+    public float sumOfSquaredWeights() throws IOException {
+      throw new IllegalStateException("Weight already normalized.");
+    }
+
+    @Override
+    public boolean scoresDocsOutOfOrder() {
+      return w.scoresDocsOutOfOrder();
+    }
+  }
+  
+  /** Ensures, that the returned {@code Weight} is not normalized again, which may produce wrong scores. */
+  @Override
+  public Weight createNormalizedWeight(Query query) throws IOException {
+    final Weight w = super.createNormalizedWeight(query);
+    return new UnmodifiableWeight(w);
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/search/CachingWrapperFilterHelper.java b/lucene/backwards/src/test-framework/org/apache/lucene/search/CachingWrapperFilterHelper.java
new file mode 100644
index 0000000..80df572
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/search/CachingWrapperFilterHelper.java
@@ -0,0 +1,73 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import junit.framework.Assert;
+import org.apache.lucene.index.IndexReader;
+
+/**
+ * A unit test helper class to test when the filter is getting cached and when it is not.
+ */
+public class CachingWrapperFilterHelper extends CachingWrapperFilter {
+  
+  private boolean shouldHaveCache = false;
+
+  /**
+   * @param filter Filter to cache results of
+   */
+  public CachingWrapperFilterHelper(Filter filter) {
+    super(filter);
+  }
+  
+  public void setShouldHaveCache(boolean shouldHaveCache) {
+    this.shouldHaveCache = shouldHaveCache;
+  }
+  
+  @Override
+  public synchronized DocIdSet getDocIdSet(IndexReader reader) throws IOException {
+
+    final int saveMissCount = missCount;
+    DocIdSet docIdSet = super.getDocIdSet(reader);
+
+    if (shouldHaveCache) {
+      Assert.assertEquals("Cache should have data ", saveMissCount, missCount);
+    } else {
+      Assert.assertTrue("Cache should be null " + docIdSet, missCount > saveMissCount);
+    }
+
+    return docIdSet;
+  }
+
+  @Override
+  public String toString() {
+    return "CachingWrapperFilterHelper("+filter+")";
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (!(o instanceof CachingWrapperFilterHelper)) return false;
+    return this.filter.equals(o);
+  }
+  
+  @Override
+  public int hashCode() {
+    return this.filter.hashCode() ^ 0x5525aacb;
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/search/CheckHits.java b/lucene/backwards/src/test-framework/org/apache/lucene/search/CheckHits.java
new file mode 100644
index 0000000..858d84d
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/search/CheckHits.java
@@ -0,0 +1,517 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.Random;
+
+import junit.framework.Assert;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.store.Directory;
+
+public class CheckHits {
+  
+  /**
+   * Some explains methods calculate their values though a slightly
+   * different  order of operations from the actual scoring method ...
+   * this allows for a small amount of variation
+   */
+  public static float EXPLAIN_SCORE_TOLERANCE_DELTA = 0.0002f;
+    
+  /**
+   * Tests that all documents up to maxDoc which are *not* in the
+   * expected result set, have an explanation which indicates that 
+   * the document does not match
+   */
+  public static void checkNoMatchExplanations(Query q, String defaultFieldName,
+                                              Searcher searcher, int[] results)
+    throws IOException {
+
+    String d = q.toString(defaultFieldName);
+    Set<Integer> ignore = new TreeSet<Integer>();
+    for (int i = 0; i < results.length; i++) {
+      ignore.add(Integer.valueOf(results[i]));
+    }
+    
+    int maxDoc = searcher.maxDoc();
+    for (int doc = 0; doc < maxDoc; doc++) {
+      if (ignore.contains(Integer.valueOf(doc))) continue;
+
+      Explanation exp = searcher.explain(q, doc);
+      Assert.assertNotNull("Explanation of [["+d+"]] for #"+doc+" is null",
+                             exp);
+      Assert.assertFalse("Explanation of [["+d+"]] for #"+doc+
+                         " doesn't indicate non-match: " + exp.toString(),
+                         exp.isMatch());
+    }
+    
+  }
+  
+  /**
+   * Tests that a query matches the an expected set of documents using a
+   * HitCollector.
+   *
+   * <p>
+   * Note that when using the HitCollector API, documents will be collected
+   * if they "match" regardless of what their score is.
+   * </p>
+   * @param query the query to test
+   * @param searcher the searcher to test the query against
+   * @param defaultFieldName used for displaying the query in assertion messages
+   * @param results a list of documentIds that must match the query
+   * @see Searcher#search(Query,Collector)
+   * @see #checkHits
+   */
+  public static void checkHitCollector(Random random, Query query, String defaultFieldName,
+                                       Searcher searcher, int[] results)
+    throws IOException {
+
+    QueryUtils.check(random,query,searcher);
+    
+    Set<Integer> correct = new TreeSet<Integer>();
+    for (int i = 0; i < results.length; i++) {
+      correct.add(Integer.valueOf(results[i]));
+    }
+    final Set<Integer> actual = new TreeSet<Integer>();
+    final Collector c = new SetCollector(actual);
+
+    searcher.search(query, c);
+    Assert.assertEquals("Simple: " + query.toString(defaultFieldName), 
+                        correct, actual);
+
+    for (int i = -1; i < 2; i++) {
+      actual.clear();
+      QueryUtils.wrapSearcher(random, searcher, i).search(query, c);
+      Assert.assertEquals("Wrap Searcher " + i + ": " +
+                          query.toString(defaultFieldName),
+                          correct, actual);
+    }
+                        
+    if ( ! ( searcher instanceof IndexSearcher ) ) return;
+
+    for (int i = -1; i < 2; i++) {
+      actual.clear();
+      QueryUtils.wrapUnderlyingReader
+        (random, (IndexSearcher)searcher, i).search(query, c);
+      Assert.assertEquals("Wrap Reader " + i + ": " +
+                          query.toString(defaultFieldName),
+                          correct, actual);
+    }
+  }
+
+  public static class SetCollector extends Collector {
+    final Set<Integer> bag;
+    public SetCollector(Set<Integer> bag) {
+      this.bag = bag;
+    }
+    private int base = 0;
+    @Override
+    public void setScorer(Scorer scorer) throws IOException {}
+    @Override
+    public void collect(int doc) {
+      bag.add(Integer.valueOf(doc + base));
+    }
+    @Override
+    public void setNextReader(IndexReader reader, int docBase) {
+      base = docBase;
+    }
+    @Override
+    public boolean acceptsDocsOutOfOrder() {
+      return true;
+    }
+  }
+
+  /**
+   * Tests that a query matches the an expected set of documents using Hits.
+   *
+   * <p>
+   * Note that when using the Hits API, documents will only be returned
+   * if they have a positive normalized score.
+   * </p>
+   * @param query the query to test
+   * @param searcher the searcher to test the query against
+   * @param defaultFieldName used for displaing the query in assertion messages
+   * @param results a list of documentIds that must match the query
+   * @see Searcher#search(Query, int)
+   * @see #checkHitCollector
+   */
+  public static void checkHits(
+        Random random,
+        Query query,
+        String defaultFieldName,
+        Searcher searcher,
+        int[] results)
+          throws IOException {
+
+    ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
+
+    Set<Integer> correct = new TreeSet<Integer>();
+    for (int i = 0; i < results.length; i++) {
+      correct.add(Integer.valueOf(results[i]));
+    }
+
+    Set<Integer> actual = new TreeSet<Integer>();
+    for (int i = 0; i < hits.length; i++) {
+      actual.add(Integer.valueOf(hits[i].doc));
+    }
+
+    Assert.assertEquals(query.toString(defaultFieldName), correct, actual);
+
+    QueryUtils.check(random, query,searcher);
+  }
+
+  /** Tests that a Hits has an expected order of documents */
+  public static void checkDocIds(String mes, int[] results, ScoreDoc[] hits)
+  throws IOException {
+    Assert.assertEquals(mes + " nr of hits", hits.length, results.length);
+    for (int i = 0; i < results.length; i++) {
+      Assert.assertEquals(mes + " doc nrs for hit " + i, results[i], hits[i].doc);
+    }
+  }
+
+  /** Tests that two queries have an expected order of documents,
+   * and that the two queries have the same score values.
+   */
+  public static void checkHitsQuery(
+        Query query,
+        ScoreDoc[] hits1,
+        ScoreDoc[] hits2,
+        int[] results)
+          throws IOException {
+
+    checkDocIds("hits1", results, hits1);
+    checkDocIds("hits2", results, hits2);
+    checkEqual(query, hits1, hits2);
+  }
+
+  public static void checkEqual(Query query, ScoreDoc[] hits1, ScoreDoc[] hits2) throws IOException {
+     final float scoreTolerance = 1.0e-6f;
+     if (hits1.length != hits2.length) {
+       Assert.fail("Unequal lengths: hits1="+hits1.length+",hits2="+hits2.length);
+     }
+    for (int i = 0; i < hits1.length; i++) {
+      if (hits1[i].doc != hits2[i].doc) {
+        Assert.fail("Hit " + i + " docnumbers don't match\n"
+                + hits2str(hits1, hits2,0,0)
+                + "for query:" + query.toString());
+      }
+
+      if ((hits1[i].doc != hits2[i].doc)
+          || Math.abs(hits1[i].score -  hits2[i].score) > scoreTolerance)
+      {
+        Assert.fail("Hit " + i + ", doc nrs " + hits1[i].doc + " and " + hits2[i].doc
+                      + "\nunequal       : " + hits1[i].score
+                      + "\n           and: " + hits2[i].score
+                      + "\nfor query:" + query.toString());
+      }
+    }
+  }
+
+  public static String hits2str(ScoreDoc[] hits1, ScoreDoc[] hits2, int start, int end) throws IOException {
+    StringBuilder sb = new StringBuilder();
+    int len1=hits1==null ? 0 : hits1.length;
+    int len2=hits2==null ? 0 : hits2.length;
+    if (end<=0) {
+      end = Math.max(len1,len2);
+    }
+
+      sb.append("Hits length1=").append(len1).append("\tlength2=").append(len2);
+
+    sb.append('\n');
+    for (int i=start; i<end; i++) {
+        sb.append("hit=").append(i).append(':');
+      if (i<len1) {
+          sb.append(" doc").append(hits1[i].doc).append('=').append(hits1[i].score);
+      } else {
+        sb.append("               ");
+      }
+      sb.append(",\t");
+      if (i<len2) {
+        sb.append(" doc").append(hits2[i].doc).append('=').append(hits2[i].score);
+      }
+      sb.append('\n');
+    }
+    return sb.toString();
+  }
+
+
+  public static String topdocsString(TopDocs docs, int start, int end) {
+    StringBuilder sb = new StringBuilder();
+      sb.append("TopDocs totalHits=").append(docs.totalHits).append(" top=").append(docs.scoreDocs.length).append('\n');
+    if (end<=0) end=docs.scoreDocs.length;
+    else end=Math.min(end,docs.scoreDocs.length);
+    for (int i=start; i<end; i++) {
+      sb.append('\t');
+      sb.append(i);
+      sb.append(") doc=");
+      sb.append(docs.scoreDocs[i].doc);
+      sb.append("\tscore=");
+      sb.append(docs.scoreDocs[i].score);
+      sb.append('\n');
+    }
+    return sb.toString();
+  }
+
+  /**
+   * Asserts that the explanation value for every document matching a
+   * query corresponds with the true score. 
+   *
+   * @see ExplanationAsserter
+   * @see #checkExplanations(Query, String, Searcher, boolean) for a
+   * "deep" testing of the explanation details.
+   *   
+   * @param query the query to test
+   * @param searcher the searcher to test the query against
+   * @param defaultFieldName used for displaing the query in assertion messages
+   */
+  public static void checkExplanations(Query query,
+                                       String defaultFieldName,
+                                       Searcher searcher) throws IOException {
+    checkExplanations(query, defaultFieldName, searcher, false);
+  }
+
+  /**
+   * Asserts that the explanation value for every document matching a
+   * query corresponds with the true score.  Optionally does "deep" 
+   * testing of the explanation details.
+   *
+   * @see ExplanationAsserter
+   * @param query the query to test
+   * @param searcher the searcher to test the query against
+   * @param defaultFieldName used for displaing the query in assertion messages
+   * @param deep indicates whether a deep comparison of sub-Explanation details should be executed
+   */
+  public static void checkExplanations(Query query,
+                                       String defaultFieldName,
+                                       Searcher searcher, 
+                                       boolean deep) throws IOException {
+
+    searcher.search(query,
+                    new ExplanationAsserter
+                    (query, defaultFieldName, searcher, deep));
+
+  }
+
+  /** 
+   * Assert that an explanation has the expected score, and optionally that its
+   * sub-details max/sum/factor match to that score.
+   *
+   * @param q String representation of the query for assertion messages
+   * @param doc Document ID for assertion messages
+   * @param score Real score value of doc with query q
+   * @param deep indicates whether a deep comparison of sub-Explanation details should be executed
+   * @param expl The Explanation to match against score
+   */
+  public static void verifyExplanation(String q, 
+                                       int doc, 
+                                       float score,
+                                       boolean deep,
+                                       Explanation expl) {
+    float value = expl.getValue();
+    Assert.assertEquals(q+": score(doc="+doc+")="+score+
+        " != explanationScore="+value+" Explanation: "+expl,
+        score,value,EXPLAIN_SCORE_TOLERANCE_DELTA);
+
+    if (!deep) return;
+
+    Explanation detail[] = expl.getDetails();
+    if (detail!=null) {
+      if (detail.length==1) {
+        // simple containment, no matter what the description says, 
+        // just verify contained expl has same score
+        verifyExplanation(q,doc,score,deep,detail[0]);
+      } else {
+        // explanation must either:
+        // - end with one of: "product of:", "sum of:", "max of:", or
+        // - have "max plus <x> times others" (where <x> is float).
+        float x = 0;
+        String descr = expl.getDescription().toLowerCase();
+        boolean productOf = descr.endsWith("product of:");
+        boolean sumOf = descr.endsWith("sum of:");
+        boolean maxOf = descr.endsWith("max of:");
+        boolean maxTimesOthers = false;
+        if (!(productOf || sumOf || maxOf)) {
+          // maybe 'max plus x times others'
+          int k1 = descr.indexOf("max plus ");
+          if (k1>=0) {
+            k1 += "max plus ".length();
+            int k2 = descr.indexOf(" ",k1);
+            try {
+              x = Float.parseFloat(descr.substring(k1,k2).trim());
+              if (descr.substring(k2).trim().equals("times others of:")) {
+                maxTimesOthers = true;
+              }
+            } catch (NumberFormatException e) {
+            }
+          }
+        }
+        Assert.assertTrue(
+            q+": multi valued explanation description=\""+descr
+            +"\" must be 'max of plus x times others' or end with 'product of'"
+            +" or 'sum of:' or 'max of:' - "+expl,
+            productOf || sumOf || maxOf || maxTimesOthers);
+        float sum = 0;
+        float product = 1;
+        float max = 0;
+        for (int i=0; i<detail.length; i++) {
+          float dval = detail[i].getValue();
+          verifyExplanation(q,doc,dval,deep,detail[i]);
+          product *= dval;
+          sum += dval;
+          max = Math.max(max,dval);
+        }
+        float combined = 0;
+        if (productOf) {
+          combined = product;
+        } else if (sumOf) {
+          combined = sum;
+        } else if (maxOf) {
+          combined = max;
+        } else if (maxTimesOthers) {
+          combined = max + x * (sum - max);
+        } else {
+            Assert.assertTrue("should never get here!",false);
+        }
+        Assert.assertEquals(q+": actual subDetails combined=="+combined+
+            " != value="+value+" Explanation: "+expl,
+            combined,value,EXPLAIN_SCORE_TOLERANCE_DELTA);
+      }
+    }
+  }
+
+  /**
+   * an IndexSearcher that implicitly checks hte explanation of every match
+   * whenever it executes a search.
+   *
+   * @see ExplanationAsserter
+   */
+  public static class ExplanationAssertingSearcher extends IndexSearcher {
+    public ExplanationAssertingSearcher(Directory d) throws IOException {
+      super(d, true);
+    }
+    public ExplanationAssertingSearcher(IndexReader r) throws IOException {
+      super(r);
+    }
+    protected void checkExplanations(Query q) throws IOException {
+      super.search(q, null,
+                   new ExplanationAsserter
+                   (q, null, this));
+    }
+    @Override
+    public TopFieldDocs search(Query query,
+                               Filter filter,
+                               int n,
+                               Sort sort) throws IOException {
+      
+      checkExplanations(query);
+      return super.search(query,filter,n,sort);
+    }
+    @Override
+    public void search(Query query, Collector results) throws IOException {
+      checkExplanations(query);
+      super.search(query, results);
+    }
+    @Override
+    public void search(Query query, Filter filter, Collector results) throws IOException {
+      checkExplanations(query);
+      super.search(query, filter, results);
+    }
+    @Override
+    public TopDocs search(Query query, Filter filter,
+                          int n) throws IOException {
+
+      checkExplanations(query);
+      return super.search(query,filter, n);
+    }
+  }
+    
+  /**
+   * Asserts that the score explanation for every document matching a
+   * query corresponds with the true score.
+   *
+   * NOTE: this HitCollector should only be used with the Query and Searcher
+   * specified at when it is constructed.
+   *
+   * @see CheckHits#verifyExplanation
+   */
+  public static class ExplanationAsserter extends Collector {
+
+    /**
+     * @deprecated
+     * @see CheckHits#EXPLAIN_SCORE_TOLERANCE_DELTA
+     */
+    @Deprecated
+    public static float SCORE_TOLERANCE_DELTA = 0.00005f;
+
+    Query q;
+    Searcher s;
+    String d;
+    boolean deep;
+    
+    Scorer scorer;
+    private int base = 0;
+
+    /** Constructs an instance which does shallow tests on the Explanation */
+    public ExplanationAsserter(Query q, String defaultFieldName, Searcher s) {
+      this(q,defaultFieldName,s,false);
+    }      
+    public ExplanationAsserter(Query q, String defaultFieldName, Searcher s, boolean deep) {
+      this.q=q;
+      this.s=s;
+      this.d = q.toString(defaultFieldName);
+      this.deep=deep;
+    }      
+    
+    @Override
+    public void setScorer(Scorer scorer) throws IOException {
+      this.scorer = scorer;     
+    }
+    
+    @Override
+    public void collect(int doc) throws IOException {
+      Explanation exp = null;
+      doc = doc + base;
+      try {
+        exp = s.explain(q, doc);
+      } catch (IOException e) {
+        throw new RuntimeException
+          ("exception in hitcollector of [["+d+"]] for #"+doc, e);
+      }
+      
+      Assert.assertNotNull("Explanation of [["+d+"]] for #"+doc+" is null", exp);
+      verifyExplanation(d,doc,scorer.score(),deep,exp);
+      Assert.assertTrue("Explanation of [["+d+"]] for #"+ doc + 
+                        " does not indicate match: " + exp.toString(), 
+                        exp.isMatch());
+    }
+    @Override
+    public void setNextReader(IndexReader reader, int docBase) {
+      base = docBase;
+    }
+    @Override
+    public boolean acceptsDocsOutOfOrder() {
+      return true;
+    }
+  }
+
+}
+
+
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/search/QueryUtils.java b/lucene/backwards/src/test-framework/org/apache/lucene/search/QueryUtils.java
new file mode 100644
index 0000000..0cf9016
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/search/QueryUtils.java
@@ -0,0 +1,461 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.util.Random;
+
+import junit.framework.Assert;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.MultiReader;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util._TestUtil;
+
+import static org.apache.lucene.util.LuceneTestCase.TEST_VERSION_CURRENT;
+
+
+
+
+public class QueryUtils {
+
+  /** Check the types of things query objects should be able to do. */
+  public static void check(Query q) {
+    checkHashEquals(q);
+  }
+
+  /** check very basic hashCode and equals */
+  public static void checkHashEquals(Query q) {
+    Query q2 = (Query)q.clone();
+    checkEqual(q,q2);
+
+    Query q3 = (Query)q.clone();
+    q3.setBoost(7.21792348f);
+    checkUnequal(q,q3);
+
+    // test that a class check is done so that no exception is thrown
+    // in the implementation of equals()
+    Query whacky = new Query() {
+      @Override
+      public String toString(String field) {
+        return "My Whacky Query";
+      }
+    };
+    whacky.setBoost(q.getBoost());
+    checkUnequal(q, whacky);
+    
+    // null test
+    Assert.assertFalse(q.equals(null));
+  }
+
+  public static void checkEqual(Query q1, Query q2) {
+    Assert.assertEquals(q1, q2);
+    Assert.assertEquals(q1.hashCode(), q2.hashCode());
+  }
+
+  public static void checkUnequal(Query q1, Query q2) {
+    Assert.assertTrue(!q1.equals(q2));
+    Assert.assertTrue(!q2.equals(q1));
+
+    // possible this test can fail on a hash collision... if that
+    // happens, please change test to use a different example.
+    Assert.assertTrue(q1.hashCode() != q2.hashCode());
+  }
+  
+  /** deep check that explanations of a query 'score' correctly */
+  public static void checkExplanations (final Query q, final Searcher s) throws IOException {
+    CheckHits.checkExplanations(q, null, s, true);
+  }
+  
+  /** 
+   * Various query sanity checks on a searcher, some checks are only done for
+   * instanceof IndexSearcher.
+   *
+   * @see #check(Query)
+   * @see #checkFirstSkipTo
+   * @see #checkSkipTo
+   * @see #checkExplanations
+   * @see #checkSerialization
+   * @see #checkEqual
+   */
+  public static void check(Random random, Query q1, Searcher s) {
+    check(random, q1, s, true);
+  }
+  private static void check(Random random, Query q1, Searcher s, boolean wrap) {
+    try {
+      check(q1);
+      if (s!=null) {
+        if (s instanceof IndexSearcher) {
+          IndexSearcher is = (IndexSearcher)s;
+          checkFirstSkipTo(q1,is);
+          checkSkipTo(q1,is);
+          if (wrap) {
+            check(random, q1, wrapUnderlyingReader(random, is, -1), false);
+            check(random, q1, wrapUnderlyingReader(random, is,  0), false);
+            check(random, q1, wrapUnderlyingReader(random, is, +1), false);
+          }
+        }
+        if (wrap) {
+          check(random,q1, wrapSearcher(random, s, -1), false);
+          check(random,q1, wrapSearcher(random, s,  0), false);
+          check(random,q1, wrapSearcher(random, s, +1), false);
+        }
+        checkExplanations(q1,s);
+        checkSerialization(q1,s);
+        
+        Query q2 = (Query)q1.clone();
+        checkEqual(s.rewrite(q1),
+                   s.rewrite(q2));
+      }
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * Given an IndexSearcher, returns a new IndexSearcher whose IndexReader 
+   * is a MultiReader containing the Reader of the original IndexSearcher, 
+   * as well as several "empty" IndexReaders -- some of which will have 
+   * deleted documents in them.  This new IndexSearcher should 
+   * behave exactly the same as the original IndexSearcher.
+   * @param s the searcher to wrap
+   * @param edge if negative, s will be the first sub; if 0, s will be in the middle, if positive s will be the last sub
+   */
+  public static IndexSearcher wrapUnderlyingReader(Random random, final IndexSearcher s, final int edge) 
+    throws IOException {
+
+    IndexReader r = s.getIndexReader();
+
+    // we can't put deleted docs before the nested reader, because
+    // it will throw off the docIds
+    IndexReader[] readers = new IndexReader[] {
+      edge < 0 ? r : IndexReader.open(makeEmptyIndex(random, 0), true),
+      IndexReader.open(makeEmptyIndex(random, 0), true),
+      new MultiReader(new IndexReader[] {
+        IndexReader.open(makeEmptyIndex(random, edge < 0 ? 4 : 0), true),
+        IndexReader.open(makeEmptyIndex(random, 0), true),
+        0 == edge ? r : IndexReader.open(makeEmptyIndex(random, 0), true)
+      }),
+      IndexReader.open(makeEmptyIndex(random, 0 < edge ? 0 : 7), true),
+      IndexReader.open(makeEmptyIndex(random, 0), true),
+      new MultiReader(new IndexReader[] {
+        IndexReader.open(makeEmptyIndex(random, 0 < edge ? 0 : 5), true),
+        IndexReader.open(makeEmptyIndex(random, 0), true),
+        0 < edge ? r : IndexReader.open(makeEmptyIndex(random, 0), true)
+      })
+    };
+    IndexSearcher out = new IndexSearcher(new MultiReader(readers));
+    out.setSimilarity(s.getSimilarity());
+    return out;
+  }
+  /**
+   * Given a Searcher, returns a new MultiSearcher wrapping the  
+   * the original Searcher, 
+   * as well as several "empty" IndexSearchers -- some of which will have
+   * deleted documents in them.  This new MultiSearcher 
+   * should behave exactly the same as the original Searcher.
+   * @param s the Searcher to wrap
+   * @param edge if negative, s will be the first sub; if 0, s will be in hte middle, if positive s will be the last sub
+   */
+  public static MultiSearcher wrapSearcher(Random random, final Searcher s, final int edge) 
+    throws IOException {
+
+    // we can't put deleted docs before the nested reader, because
+    // it will through off the docIds
+    Searcher[] searchers = new Searcher[] {
+      edge < 0 ? s : new IndexSearcher(makeEmptyIndex(random, 0), true),
+      new MultiSearcher(new Searcher[] {
+        new IndexSearcher(makeEmptyIndex(random, edge < 0 ? 65 : 0), true),
+        new IndexSearcher(makeEmptyIndex(random, 0), true),
+        0 == edge ? s : new IndexSearcher(makeEmptyIndex(random, 0), true)
+      }),
+      new IndexSearcher(makeEmptyIndex(random, 0 < edge ? 0 : 3), true),
+      new IndexSearcher(makeEmptyIndex(random, 0), true),
+      new MultiSearcher(new Searcher[] {
+        new IndexSearcher(makeEmptyIndex(random, 0 < edge ? 0 : 5), true),
+        new IndexSearcher(makeEmptyIndex(random, 0), true),
+        0 < edge ? s : new IndexSearcher(makeEmptyIndex(random, 0), true)
+      })
+    };
+    MultiSearcher out = new MultiSearcher(searchers);
+    out.setSimilarity(s.getSimilarity());
+    return out;
+  }
+
+  private static Directory makeEmptyIndex(Random random, final int numDeletedDocs) 
+    throws IOException {
+    Directory d = new MockDirectoryWrapper(random, new RAMDirectory());
+      IndexWriter w = new IndexWriter(d, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+
+      for (int i = 0; i < numDeletedDocs; i++) {
+        w.addDocument(new Document());
+      }
+      w.commit();
+      w.deleteDocuments( new MatchAllDocsQuery() );
+      _TestUtil.keepFullyDeletedSegments(w);
+      w.commit();
+
+      if (0 < numDeletedDocs)
+        Assert.assertTrue("writer has no deletions", w.hasDeletions());
+
+      Assert.assertEquals("writer is missing some deleted docs", 
+                          numDeletedDocs, w.maxDoc());
+      Assert.assertEquals("writer has non-deleted docs", 
+                          0, w.numDocs());
+      w.close();
+      IndexReader r = IndexReader.open(d, true);
+      Assert.assertEquals("reader has wrong number of deleted docs", 
+                          numDeletedDocs, r.numDeletedDocs());
+      r.close();
+      return d;
+  }
+  
+
+  /** check that the query weight is serializable. 
+   * @throws IOException if serialization check fail. 
+   */
+  private static void checkSerialization(Query q, Searcher s) throws IOException {
+    Weight w = s.createNormalizedWeight(q);
+    try {
+      ByteArrayOutputStream bos = new ByteArrayOutputStream();
+      ObjectOutputStream oos = new ObjectOutputStream(bos);
+      oos.writeObject(w);
+      oos.close();
+      ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray()));
+      ois.readObject();
+      ois.close();
+      
+      //skip equals() test for now - most weights don't override equals() and we won't add this just for the tests.
+      //TestCase.assertEquals("writeObject(w) != w.  ("+w+")",w2,w);   
+      
+    } catch (Exception e) {
+      IOException e2 = new IOException("Serialization failed for "+w);
+      e2.initCause(e);
+      throw e2;
+    }
+  }
+
+
+  /** alternate scorer skipTo(),skipTo(),next(),next(),skipTo(),skipTo(), etc
+   * and ensure a hitcollector receives same docs and scores
+   */
+  public static void checkSkipTo(final Query q, final IndexSearcher s) throws IOException {
+    //System.out.println("Checking "+q);
+    
+    if (s.createNormalizedWeight(q).scoresDocsOutOfOrder()) return;  // in this case order of skipTo() might differ from that of next().
+
+    final int skip_op = 0;
+    final int next_op = 1;
+    final int orders [][] = {
+        {next_op},
+        {skip_op},
+        {skip_op, next_op},
+        {next_op, skip_op},
+        {skip_op, skip_op, next_op, next_op},
+        {next_op, next_op, skip_op, skip_op},
+        {skip_op, skip_op, skip_op, next_op, next_op},
+    };
+    for (int k = 0; k < orders.length; k++) {
+
+        final int order[] = orders[k];
+        // System.out.print("Order:");for (int i = 0; i < order.length; i++)
+        // System.out.print(order[i]==skip_op ? " skip()":" next()");
+        // System.out.println();
+        final int opidx[] = { 0 };
+        final int lastDoc[] = {-1};
+
+        // FUTURE: ensure scorer.doc()==-1
+
+        final float maxDiff = 1e-5f;
+        final IndexReader lastReader[] = {null};
+
+        s.search(q, new Collector() {
+          private Scorer sc;
+          private IndexReader reader;
+          private Scorer scorer;
+
+          @Override
+          public void setScorer(Scorer scorer) throws IOException {
+            this.sc = scorer;
+          }
+
+          @Override
+          public void collect(int doc) throws IOException {
+            float score = sc.score();
+            lastDoc[0] = doc;
+            try {
+              if (scorer == null) {
+                Weight w = s.createNormalizedWeight(q);
+                scorer = w.scorer(reader, true, false);
+              }
+              
+              int op = order[(opidx[0]++) % order.length];
+              // System.out.println(op==skip_op ?
+              // "skip("+(sdoc[0]+1)+")":"next()");
+              boolean more = op == skip_op ? scorer.advance(scorer.docID() + 1) != DocIdSetIterator.NO_MORE_DOCS
+                  : scorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS;
+              int scorerDoc = scorer.docID();
+              float scorerScore = scorer.score();
+              float scorerScore2 = scorer.score();
+              float scoreDiff = Math.abs(score - scorerScore);
+              float scorerDiff = Math.abs(scorerScore2 - scorerScore);
+              if (!more || doc != scorerDoc || scoreDiff > maxDiff
+                  || scorerDiff > maxDiff) {
+                StringBuilder sbord = new StringBuilder();
+                for (int i = 0; i < order.length; i++)
+                  sbord.append(order[i] == skip_op ? " skip()" : " next()");
+                throw new RuntimeException("ERROR matching docs:" + "\n\t"
+                    + (doc != scorerDoc ? "--> " : "") + "doc=" + doc + ", scorerDoc=" + scorerDoc
+                    + "\n\t" + (!more ? "--> " : "") + "tscorer.more=" + more
+                    + "\n\t" + (scoreDiff > maxDiff ? "--> " : "")
+                    + "scorerScore=" + scorerScore + " scoreDiff=" + scoreDiff
+                    + " maxDiff=" + maxDiff + "\n\t"
+                    + (scorerDiff > maxDiff ? "--> " : "") + "scorerScore2="
+                    + scorerScore2 + " scorerDiff=" + scorerDiff
+                    + "\n\thitCollector.doc=" + doc + " score=" + score
+                    + "\n\t Scorer=" + scorer + "\n\t Query=" + q + "  "
+                    + q.getClass().getName() + "\n\t Searcher=" + s
+                    + "\n\t Order=" + sbord + "\n\t Op="
+                    + (op == skip_op ? " skip()" : " next()"));
+              }
+            } catch (IOException e) {
+              throw new RuntimeException(e);
+            }
+          }
+
+          @Override
+          public void setNextReader(IndexReader reader, int docBase) throws IOException {
+            // confirm that skipping beyond the last doc, on the
+            // previous reader, hits NO_MORE_DOCS
+            if (lastReader[0] != null) {
+              final IndexReader previousReader = lastReader[0];
+              Weight w = new IndexSearcher(previousReader).createNormalizedWeight(q);
+              Scorer scorer = w.scorer(previousReader, true, false);
+              if (scorer != null) {
+                boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
+                Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
+              }
+            }
+            this.reader = lastReader[0] = reader;
+            this.scorer = null;
+            lastDoc[0] = -1;
+          }
+
+          @Override
+          public boolean acceptsDocsOutOfOrder() {
+            return true;
+          }
+        });
+
+        if (lastReader[0] != null) {
+          // confirm that skipping beyond the last doc, on the
+          // previous reader, hits NO_MORE_DOCS
+          final IndexReader previousReader = lastReader[0];
+          Weight w = new IndexSearcher(previousReader).createNormalizedWeight(q);
+          Scorer scorer = w.scorer(previousReader, true, false);
+          if (scorer != null) {
+            boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
+            Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
+          }
+        }
+      }
+  }
+    
+  // check that first skip on just created scorers always goes to the right doc
+  private static void checkFirstSkipTo(final Query q, final IndexSearcher s) throws IOException {
+    //System.out.println("checkFirstSkipTo: "+q);
+    final float maxDiff = 1e-3f;
+    final int lastDoc[] = {-1};
+    final IndexReader lastReader[] = {null};
+
+    s.search(q,new Collector() {
+      private Scorer scorer;
+      private IndexReader reader;
+      @Override
+      public void setScorer(Scorer scorer) throws IOException {
+        this.scorer = scorer;
+      }
+      @Override
+      public void collect(int doc) throws IOException {
+        //System.out.println("doc="+doc);
+        float score = scorer.score();
+        try {
+          
+          for (int i=lastDoc[0]+1; i<=doc; i++) {
+            Weight w = s.createNormalizedWeight(q);
+            Scorer scorer = w.scorer(reader, true, false);
+            Assert.assertTrue("query collected "+doc+" but skipTo("+i+") says no more docs!",scorer.advance(i) != DocIdSetIterator.NO_MORE_DOCS);
+            Assert.assertEquals("query collected "+doc+" but skipTo("+i+") got to "+scorer.docID(),doc,scorer.docID());
+            float skipToScore = scorer.score();
+            Assert.assertEquals("unstable skipTo("+i+") score!",skipToScore,scorer.score(),maxDiff); 
+            Assert.assertEquals("query assigned doc "+doc+" a score of <"+score+"> but skipTo("+i+") has <"+skipToScore+">!",score,skipToScore,maxDiff);
+          }
+          lastDoc[0] = doc;
+        } catch (IOException e) {
+          throw new RuntimeException(e);
+        }
+      }
+
+      @Override
+      public void setNextReader(IndexReader reader, int docBase) throws IOException {
+        // confirm that skipping beyond the last doc, on the
+        // previous reader, hits NO_MORE_DOCS
+        if (lastReader[0] != null) {
+          final IndexReader previousReader = lastReader[0];
+          Weight w = new IndexSearcher(previousReader).createNormalizedWeight(q);
+          Scorer scorer = w.scorer(previousReader, true, false);
+
+          if (scorer != null) {
+            boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
+            Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
+          }
+        }
+
+        this.reader = lastReader[0] = reader;
+        lastDoc[0] = -1;
+      }
+      @Override
+      public boolean acceptsDocsOutOfOrder() {
+        return false;
+      }
+    });
+
+    if (lastReader[0] != null) {
+      // confirm that skipping beyond the last doc, on the
+      // previous reader, hits NO_MORE_DOCS
+      final IndexReader previousReader = lastReader[0];
+      Weight w = new IndexSearcher(previousReader).createNormalizedWeight(q);
+      Scorer scorer = w.scorer(previousReader, true, false);
+      if (scorer != null) {
+        boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
+        Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
+      }
+    }
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/store/MockDirectoryWrapper.java b/lucene/backwards/src/test-framework/org/apache/lucene/store/MockDirectoryWrapper.java
new file mode 100644
index 0000000..4149153
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/store/MockDirectoryWrapper.java
@@ -0,0 +1,651 @@
+package org.apache.lucene.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Closeable;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.IdentityHashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.ThrottledIndexOutput;
+import org.apache.lucene.util._TestUtil;
+
+/**
+ * This is a Directory Wrapper that adds methods
+ * intended to be used only by unit tests.
+ * It also adds a number of features useful for testing:
+ * <ul>
+ *   <li> Instances created by {@link LuceneTestCase#newDirectory()} are tracked 
+ *        to ensure they are closed by the test.
+ *   <li> When a MockDirectoryWrapper is closed, it will throw an exception if 
+ *        it has any open files against it (with a stacktrace indicating where 
+ *        they were opened from).
+ *   <li> When a MockDirectoryWrapper is closed, it runs CheckIndex to test if
+ *        the index was corrupted.
+ *   <li> MockDirectoryWrapper simulates some "features" of Windows, such as
+ *        refusing to write/delete to open files.
+ * </ul>
+ */
+
+public class MockDirectoryWrapper extends Directory {
+  final Directory delegate;
+  long maxSize;
+
+  // Max actual bytes used. This is set by MockRAMOutputStream:
+  long maxUsedSize;
+  double randomIOExceptionRate;
+  Random randomState;
+  boolean noDeleteOpenFile = true;
+  boolean preventDoubleWrite = true;
+  boolean checkIndexOnClose = true;
+  boolean trackDiskUsage = false;
+  private Set<String> unSyncedFiles;
+  private Set<String> createdFiles;
+  private Set<String> openFilesForWrite = new HashSet<String>();
+  Set<String> openLocks = Collections.synchronizedSet(new HashSet<String>());
+  volatile boolean crashed;
+  private ThrottledIndexOutput throttledOutput;
+  private Throttling throttling = Throttling.SOMETIMES;
+
+  // use this for tracking files for crash.
+  // additionally: provides debugging information in case you leave one open
+  private Map<Closeable,Exception> openFileHandles = Collections.synchronizedMap(new IdentityHashMap<Closeable,Exception>());
+
+  // NOTE: we cannot initialize the Map here due to the
+  // order in which our constructor actually does this
+  // member initialization vs when it calls super.  It seems
+  // like super is called, then our members are initialized:
+  private Map<String,Integer> openFiles;
+
+  // Only tracked if noDeleteOpenFile is true: if an attempt
+  // is made to delete an open file, we enroll it here.
+  private Set<String> openFilesDeleted;
+
+  private synchronized void init() {
+    if (openFiles == null) {
+      openFiles = new HashMap<String,Integer>();
+      openFilesDeleted = new HashSet<String>();
+    }
+
+    if (createdFiles == null)
+      createdFiles = new HashSet<String>();
+    if (unSyncedFiles == null)
+      unSyncedFiles = new HashSet<String>();
+  }
+
+  public MockDirectoryWrapper(Random random, Directory delegate) {
+    this.delegate = delegate;
+    // must make a private random since our methods are
+    // called from different threads; else test failures may
+    // not be reproducible from the original seed
+    this.randomState = new Random(random.nextInt());
+    this.throttledOutput = new ThrottledIndexOutput(ThrottledIndexOutput
+        .mBitsToBytes(40 + randomState.nextInt(10)), 5 + randomState.nextInt(5), null);
+    // force wrapping of lockfactory
+    try {
+      setLockFactory(new MockLockFactoryWrapper(this, delegate.getLockFactory()));
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+    init();
+  }
+
+  public void setTrackDiskUsage(boolean v) {
+    trackDiskUsage = v;
+  }
+
+  /** If set to true, we throw an IOException if the same
+   *  file is opened by createOutput, ever. */
+  public void setPreventDoubleWrite(boolean value) {
+    preventDoubleWrite = value;
+  }
+
+  @Deprecated
+  @Override
+  public void sync(String name) throws IOException {
+    maybeYield();
+    maybeThrowDeterministicException();
+    if (crashed)
+      throw new IOException("cannot sync after crash");
+    unSyncedFiles.remove(name);
+    delegate.sync(name);
+  }
+  
+  public static enum Throttling {
+    /** always emulate a slow hard disk. could be very slow! */
+    ALWAYS,
+    /** sometimes (2% of the time) emulate a slow hard disk. */
+    SOMETIMES,
+    /** never throttle output */
+    NEVER
+  }
+  
+  public void setThrottling(Throttling throttling) {
+    this.throttling = throttling;
+  }
+
+  @Override
+  public synchronized void sync(Collection<String> names) throws IOException {
+    maybeYield();
+    for (String name : names)
+      maybeThrowDeterministicException();
+    if (crashed)
+      throw new IOException("cannot sync after crash");
+    unSyncedFiles.removeAll(names);
+    delegate.sync(names);
+  }
+  
+  @Override
+  public String toString() {
+    // NOTE: do not maybeYield here, since it consumes
+    // randomness and can thus (unexpectedly during
+    // debugging) change the behavior of a seed
+    // maybeYield();
+    return "MockDirWrapper(" + delegate + ")";
+  }
+
+  public synchronized final long sizeInBytes() throws IOException {
+    if (delegate instanceof RAMDirectory)
+      return ((RAMDirectory) delegate).sizeInBytes();
+    else {
+      // hack
+      long size = 0;
+      for (String file : delegate.listAll())
+        size += delegate.fileLength(file);
+      return size;
+    }
+  }
+
+  /** Simulates a crash of OS or machine by overwriting
+   *  unsynced files. */
+  public synchronized void crash() throws IOException {
+    crashed = true;
+    openFiles = new HashMap<String,Integer>();
+    openFilesForWrite = new HashSet<String>();
+    openFilesDeleted = new HashSet<String>();
+    Iterator<String> it = unSyncedFiles.iterator();
+    unSyncedFiles = new HashSet<String>();
+    // first force-close all files, so we can corrupt on windows etc.
+    // clone the file map, as these guys want to remove themselves on close.
+    Map<Closeable,Exception> m = new IdentityHashMap<Closeable,Exception>(openFileHandles);
+    for (Closeable f : m.keySet())
+      try {
+        f.close();
+      } catch (Exception ignored) {}
+    
+    int count = 0;
+    while(it.hasNext()) {
+      String name = it.next();
+      if (count % 3 == 0) {
+        deleteFile(name, true);
+      } else if (count % 3 == 1) {
+        // Zero out file entirely
+        long length = fileLength(name);
+        byte[] zeroes = new byte[256];
+        long upto = 0;
+        IndexOutput out = delegate.createOutput(name);
+        while(upto < length) {
+          final int limit = (int) Math.min(length-upto, zeroes.length);
+          out.writeBytes(zeroes, 0, limit);
+          upto += limit;
+        }
+        out.close();
+      } else if (count % 3 == 2) {
+        // Truncate the file:
+        IndexOutput out = delegate.createOutput(name);
+        out.setLength(fileLength(name)/2);
+        out.close();
+      }
+      count++;
+    }
+  }
+
+  public synchronized void clearCrash() throws IOException {
+    crashed = false;
+    openLocks.clear();
+  }
+
+  public void setMaxSizeInBytes(long maxSize) {
+    this.maxSize = maxSize;
+  }
+  public long getMaxSizeInBytes() {
+    return this.maxSize;
+  }
+
+  /**
+   * Returns the peek actual storage used (bytes) in this
+   * directory.
+   */
+  public long getMaxUsedSizeInBytes() {
+    return this.maxUsedSize;
+  }
+  public void resetMaxUsedSizeInBytes() throws IOException {
+    this.maxUsedSize = getRecomputedActualSizeInBytes();
+  }
+
+  /**
+   * Emulate windows whereby deleting an open file is not
+   * allowed (raise IOException).
+  */
+  public void setNoDeleteOpenFile(boolean value) {
+    this.noDeleteOpenFile = value;
+  }
+  public boolean getNoDeleteOpenFile() {
+    return noDeleteOpenFile;
+  }
+
+  /**
+   * Set whether or not checkindex should be run
+   * on close
+   */
+  public void setCheckIndexOnClose(boolean value) {
+    this.checkIndexOnClose = value;
+  }
+  
+  public boolean getCheckIndexOnClose() {
+    return checkIndexOnClose;
+  }
+  /**
+   * If 0.0, no exceptions will be thrown.  Else this should
+   * be a double 0.0 - 1.0.  We will randomly throw an
+   * IOException on the first write to an OutputStream based
+   * on this probability.
+   */
+  public void setRandomIOExceptionRate(double rate) {
+    randomIOExceptionRate = rate;
+  }
+  public double getRandomIOExceptionRate() {
+    return randomIOExceptionRate;
+  }
+
+  void maybeThrowIOException() throws IOException {
+    if (randomIOExceptionRate > 0.0) {
+      int number = Math.abs(randomState.nextInt() % 1000);
+      if (number < randomIOExceptionRate*1000) {
+        if (LuceneTestCase.VERBOSE) {
+          System.out.println(Thread.currentThread().getName() + ": MockDirectoryWrapper: now throw random exception");
+          new Throwable().printStackTrace(System.out);
+        }
+        throw new IOException("a random IOException");
+      }
+    }
+  }
+
+  @Override
+  public synchronized void deleteFile(String name) throws IOException {
+    maybeYield();
+    deleteFile(name, false);
+  }
+
+  // sets the cause of the incoming ioe to be the stack
+  // trace when the offending file name was opened
+  private synchronized IOException fillOpenTrace(IOException ioe, String name, boolean input) {
+    for(Map.Entry<Closeable,Exception> ent : openFileHandles.entrySet()) {
+      if (input && ent.getKey() instanceof MockIndexInputWrapper && ((MockIndexInputWrapper) ent.getKey()).name.equals(name)) {
+        ioe.initCause(ent.getValue());
+        break;
+      } else if (!input && ent.getKey() instanceof MockIndexOutputWrapper && ((MockIndexOutputWrapper) ent.getKey()).name.equals(name)) {
+        ioe.initCause(ent.getValue());
+        break;
+      }
+    }
+    return ioe;
+  }
+
+  private void maybeYield() {
+    if (randomState.nextBoolean()) {
+      Thread.yield();
+    }
+  }
+
+  private synchronized void deleteFile(String name, boolean forced) throws IOException {
+    maybeYield();
+
+    maybeThrowDeterministicException();
+
+    if (crashed && !forced)
+      throw new IOException("cannot delete after crash");
+
+    if (unSyncedFiles.contains(name))
+      unSyncedFiles.remove(name);
+    if (!forced && noDeleteOpenFile) {
+      if (openFiles.containsKey(name)) {
+        openFilesDeleted.add(name);
+        throw fillOpenTrace(new IOException("MockDirectoryWrapper: file \"" + name + "\" is still open: cannot delete"), name, true);
+      } else {
+        openFilesDeleted.remove(name);
+      }
+    }
+    delegate.deleteFile(name);
+  }
+
+  public synchronized Set<String> getOpenDeletedFiles() {
+    return new HashSet<String>(openFilesDeleted);
+  }
+
+  @Override
+  public synchronized IndexOutput createOutput(String name) throws IOException {
+    maybeYield();
+    if (crashed)
+      throw new IOException("cannot createOutput after crash");
+    init();
+    synchronized(this) {
+      if (preventDoubleWrite && createdFiles.contains(name) && !name.equals("segments.gen"))
+        throw new IOException("file \"" + name + "\" was already written to");
+    }
+    if (noDeleteOpenFile && openFiles.containsKey(name))
+      throw new IOException("MockDirectoryWrapper: file \"" + name + "\" is still open: cannot overwrite");
+    
+    if (crashed)
+      throw new IOException("cannot createOutput after crash");
+    unSyncedFiles.add(name);
+    createdFiles.add(name);
+    
+    if (delegate instanceof RAMDirectory) {
+      RAMDirectory ramdir = (RAMDirectory) delegate;
+      RAMFile file = new RAMFile(ramdir);
+      RAMFile existing = ramdir.fileMap.get(name);
+    
+      // Enforce write once:
+      if (existing!=null && !name.equals("segments.gen") && preventDoubleWrite)
+        throw new IOException("file " + name + " already exists");
+      else {
+        if (existing!=null) {
+          ramdir.sizeInBytes.getAndAdd(-existing.sizeInBytes);
+          existing.directory = null;
+        }
+        ramdir.fileMap.put(name, file);
+      }
+    }
+    
+    //System.out.println(Thread.currentThread().getName() + ": MDW: create " + name);
+    IndexOutput io = new MockIndexOutputWrapper(this, delegate.createOutput(name), name);
+    addFileHandle(io, name, false);
+    openFilesForWrite.add(name);
+    
+    // throttling REALLY slows down tests, so don't do it very often for SOMETIMES.
+    if (throttling == Throttling.ALWAYS || 
+        (throttling == Throttling.SOMETIMES && randomState.nextInt(50) == 0)) {
+      if (LuceneTestCase.VERBOSE) {
+        System.out.println("MockDirectoryWrapper: throttling indexOutput");
+      }
+      return throttledOutput.newFromDelegate(io);
+    } else {
+      return io;
+    }
+  }
+
+  synchronized void addFileHandle(Closeable c, String name, boolean input) {
+    Integer v = openFiles.get(name);
+    if (v != null) {
+      v = Integer.valueOf(v.intValue()+1);
+      openFiles.put(name, v);
+    } else {
+      openFiles.put(name, Integer.valueOf(1));
+    }
+    
+    openFileHandles.put(c, new RuntimeException("unclosed Index" + (input ? "Input" : "Output") + ": " + name));
+  }
+  
+  @Override
+  public synchronized IndexInput openInput(String name) throws IOException {
+    maybeYield();
+    if (!delegate.fileExists(name))
+      throw new FileNotFoundException(name);
+
+    // cannot open a file for input if it's still open for
+    // output, except for segments.gen and segments_N
+    if (openFilesForWrite.contains(name) && !name.startsWith("segments")) {
+      throw fillOpenTrace(new IOException("MockDirectoryWrapper: file \"" + name + "\" is still open for writing"), name, false);
+    }
+
+    IndexInput ii = new MockIndexInputWrapper(this, name, delegate.openInput(name));
+    addFileHandle(ii, name, true);
+    return ii;
+  }
+
+  /** Provided for testing purposes.  Use sizeInBytes() instead. */
+  public synchronized final long getRecomputedSizeInBytes() throws IOException {
+    if (!(delegate instanceof RAMDirectory))
+      return sizeInBytes();
+    long size = 0;
+    for(final RAMFile file: ((RAMDirectory)delegate).fileMap.values()) {
+      size += file.getSizeInBytes();
+    }
+    return size;
+  }
+
+  /** Like getRecomputedSizeInBytes(), but, uses actual file
+   * lengths rather than buffer allocations (which are
+   * quantized up to nearest
+   * RAMOutputStream.BUFFER_SIZE (now 1024) bytes.
+   */
+
+  public final synchronized long getRecomputedActualSizeInBytes() throws IOException {
+    if (!(delegate instanceof RAMDirectory))
+      return sizeInBytes();
+    long size = 0;
+    for (final RAMFile file : ((RAMDirectory)delegate).fileMap.values())
+      size += file.length;
+    return size;
+  }
+
+  @Override
+  public synchronized void close() throws IOException {
+    maybeYield();
+    if (openFiles == null) {
+      openFiles = new HashMap<String,Integer>();
+      openFilesDeleted = new HashSet<String>();
+    }
+    if (noDeleteOpenFile && openFiles.size() > 0) {
+      // print the first one as its very verbose otherwise
+      Exception cause = null;
+      Iterator<Exception> stacktraces = openFileHandles.values().iterator();
+      if (stacktraces.hasNext())
+        cause = stacktraces.next();
+      // RuntimeException instead of IOException because
+      // super() does not throw IOException currently:
+      throw new RuntimeException("MockDirectoryWrapper: cannot close: there are still open files: " + openFiles, cause);
+    }
+    if (noDeleteOpenFile && openLocks.size() > 0) {
+      throw new RuntimeException("MockDirectoryWrapper: cannot close: there are still open locks: " + openLocks);
+    }
+    open = false;
+    if (checkIndexOnClose && IndexReader.indexExists(this)) {
+      if (LuceneTestCase.VERBOSE) {
+        System.out.println("\nNOTE: MockDirectoryWrapper: now run CheckIndex");
+      } 
+      _TestUtil.checkIndex(this);
+    }
+    delegate.close();
+  }
+
+  synchronized void removeOpenFile(Closeable c, String name) {
+    Integer v = openFiles.get(name);
+    // Could be null when crash() was called
+    if (v != null) {
+      if (v.intValue() == 1) {
+        openFiles.remove(name);
+        openFilesDeleted.remove(name);
+      } else {
+        v = Integer.valueOf(v.intValue()-1);
+        openFiles.put(name, v);
+      }
+    }
+
+    openFileHandles.remove(c);
+  }
+  
+  public synchronized void removeIndexOutput(IndexOutput out, String name) {
+    openFilesForWrite.remove(name);
+    removeOpenFile(out, name);
+  }
+  
+  public synchronized void removeIndexInput(IndexInput in, String name) {
+    removeOpenFile(in, name);
+  }
+  
+  boolean open = true;
+  
+  public synchronized boolean isOpen() {
+    return open;
+  }
+  
+  /**
+   * Objects that represent fail-able conditions. Objects of a derived
+   * class are created and registered with the mock directory. After
+   * register, each object will be invoked once for each first write
+   * of a file, giving the object a chance to throw an IOException.
+   */
+  public static class Failure {
+    /**
+     * eval is called on the first write of every new file.
+     */
+    public void eval(MockDirectoryWrapper dir) throws IOException { }
+
+    /**
+     * reset should set the state of the failure to its default
+     * (freshly constructed) state. Reset is convenient for tests
+     * that want to create one failure object and then reuse it in
+     * multiple cases. This, combined with the fact that Failure
+     * subclasses are often anonymous classes makes reset difficult to
+     * do otherwise.
+     *
+     * A typical example of use is
+     * Failure failure = new Failure() { ... };
+     * ...
+     * mock.failOn(failure.reset())
+     */
+    public Failure reset() { return this; }
+
+    protected boolean doFail;
+
+    public void setDoFail() {
+      doFail = true;
+    }
+
+    public void clearDoFail() {
+      doFail = false;
+    }
+  }
+
+  ArrayList<Failure> failures;
+
+  /**
+   * add a Failure object to the list of objects to be evaluated
+   * at every potential failure point
+   */
+  synchronized public void failOn(Failure fail) {
+    if (failures == null) {
+      failures = new ArrayList<Failure>();
+    }
+    failures.add(fail);
+  }
+
+  /**
+   * Iterate through the failures list, giving each object a
+   * chance to throw an IOE
+   */
+  synchronized void maybeThrowDeterministicException() throws IOException {
+    if (failures != null) {
+      for(int i = 0; i < failures.size(); i++) {
+        failures.get(i).eval(this);
+      }
+    }
+  }
+
+  @Override
+  public synchronized String[] listAll() throws IOException {
+    maybeYield();
+    return delegate.listAll();
+  }
+
+  @Override
+  public synchronized boolean fileExists(String name) throws IOException {
+    maybeYield();
+    return delegate.fileExists(name);
+  }
+
+  @Override
+  public synchronized long fileModified(String name) throws IOException {
+    maybeYield();
+    return delegate.fileModified(name);
+  }
+
+  @Override
+  @Deprecated
+  /*  @deprecated Lucene never uses this API; it will be
+   *  removed in 4.0. */
+  public synchronized void touchFile(String name) throws IOException {
+    maybeYield();
+    delegate.touchFile(name);
+  }
+
+  @Override
+  public synchronized long fileLength(String name) throws IOException {
+    maybeYield();
+    return delegate.fileLength(name);
+  }
+
+  @Override
+  public synchronized Lock makeLock(String name) {
+    maybeYield();
+    return delegate.makeLock(name);
+  }
+
+  @Override
+  public synchronized void clearLock(String name) throws IOException {
+    maybeYield();
+    delegate.clearLock(name);
+  }
+
+  @Override
+  public synchronized void setLockFactory(LockFactory lockFactory) throws IOException {
+    maybeYield();
+    delegate.setLockFactory(lockFactory);
+  }
+
+  @Override
+  public synchronized LockFactory getLockFactory() {
+    maybeYield();
+    return delegate.getLockFactory();
+  }
+
+  @Override
+  public synchronized String getLockID() {
+    maybeYield();
+    return delegate.getLockID();
+  }
+
+  @Override
+  public synchronized void copy(Directory to, String src, String dest) throws IOException {
+    maybeYield();
+    delegate.copy(to, src, dest);
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/store/MockIndexInputWrapper.java b/lucene/backwards/src/test-framework/org/apache/lucene/store/MockIndexInputWrapper.java
new file mode 100644
index 0000000..32d8e6f
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/store/MockIndexInputWrapper.java
@@ -0,0 +1,161 @@
+package org.apache.lucene.store;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Used by MockDirectoryWrapper to create an input stream that
+ * keeps track of when it's been closed.
+ */
+
+public class MockIndexInputWrapper extends IndexInput {
+  private MockDirectoryWrapper dir;
+  final String name;
+  private IndexInput delegate;
+  private boolean isClone;
+
+  /** Construct an empty output buffer. */
+  public MockIndexInputWrapper(MockDirectoryWrapper dir, String name, IndexInput delegate) {
+    this.name = name;
+    this.dir = dir;
+    this.delegate = delegate;
+  }
+
+  @Override
+  public void close() throws IOException {
+    try {
+      // turn on the following to look for leaks closing inputs,
+      // after fixing TestTransactions
+      // dir.maybeThrowDeterministicException();
+    } finally {
+      delegate.close();
+      // Pending resolution on LUCENE-686 we may want to
+      // remove the conditional check so we also track that
+      // all clones get closed:
+      if (!isClone) {
+        dir.removeIndexInput(this, name);
+      }
+    }
+  }
+
+  @Override
+  public Object clone() {
+    IndexInput iiclone = (IndexInput) delegate.clone();
+    MockIndexInputWrapper clone = new MockIndexInputWrapper(dir, name, iiclone);
+    clone.isClone = true;
+    // Pending resolution on LUCENE-686 we may want to
+    // uncomment this code so that we also track that all
+    // clones get closed:
+    /*
+    synchronized(dir.openFiles) {
+      if (dir.openFiles.containsKey(name)) {
+        Integer v = (Integer) dir.openFiles.get(name);
+        v = Integer.valueOf(v.intValue()+1);
+        dir.openFiles.put(name, v);
+      } else {
+        throw new RuntimeException("BUG: cloned file was not open?");
+      }
+    }
+    */
+    return clone;
+  }
+
+  @Override
+  public long getFilePointer() {
+    return delegate.getFilePointer();
+  }
+
+  @Override
+  public void seek(long pos) throws IOException {
+    delegate.seek(pos);
+  }
+
+  @Override
+  public long length() {
+    return delegate.length();
+  }
+
+  @Override
+  public byte readByte() throws IOException {
+    return delegate.readByte();
+  }
+
+  @Override
+  public void readBytes(byte[] b, int offset, int len) throws IOException {
+    delegate.readBytes(b, offset, len);
+  }
+
+  @Override
+  public void copyBytes(IndexOutput out, long numBytes) throws IOException {
+    delegate.copyBytes(out, numBytes);
+  }
+
+  @Override
+  public void readBytes(byte[] b, int offset, int len, boolean useBuffer)
+      throws IOException {
+    delegate.readBytes(b, offset, len, useBuffer);
+  }
+
+  @Override
+  public int readInt() throws IOException {
+    return delegate.readInt();
+  }
+
+  @Override
+  public int readVInt() throws IOException {
+    return delegate.readVInt();
+  }
+
+  @Override
+  public long readLong() throws IOException {
+    return delegate.readLong();
+  }
+
+  @Override
+  public long readVLong() throws IOException {
+    return delegate.readVLong();
+  }
+
+  @Override
+  public String readString() throws IOException {
+    return delegate.readString();
+  }
+
+  @Override
+  public Map<String,String> readStringStringMap() throws IOException {
+    return delegate.readStringStringMap();
+  }
+
+  @Override
+  public void setModifiedUTF8StringsMode() {
+    delegate.setModifiedUTF8StringsMode();
+  }
+
+  @Override
+  public void readChars(char[] buffer, int start, int length)
+      throws IOException {
+    delegate.readChars(buffer, start, length);
+  }
+
+  @Override
+  public void skipChars(int length) throws IOException {
+    delegate.skipChars(length);
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/store/MockIndexOutputWrapper.java b/lucene/backwards/src/test-framework/org/apache/lucene/store/MockIndexOutputWrapper.java
new file mode 100644
index 0000000..0f93567
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/store/MockIndexOutputWrapper.java
@@ -0,0 +1,159 @@
+package org.apache.lucene.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Used by MockRAMDirectory to create an output stream that
+ * will throw an IOException on fake disk full, track max
+ * disk space actually used, and maybe throw random
+ * IOExceptions.
+ */
+
+public class MockIndexOutputWrapper extends IndexOutput {
+  private MockDirectoryWrapper dir;
+  private final IndexOutput delegate;
+  private boolean first=true;
+  final String name;
+  
+  byte[] singleByte = new byte[1];
+
+  /** Construct an empty output buffer. */
+  public MockIndexOutputWrapper(MockDirectoryWrapper dir, IndexOutput delegate, String name) {
+    this.dir = dir;
+    this.name = name;
+    this.delegate = delegate;
+  }
+
+  @Override
+  public void close() throws IOException {
+    try {
+      dir.maybeThrowDeterministicException();
+    } finally {
+      delegate.close();
+      if (dir.trackDiskUsage) {
+        // Now compute actual disk usage & track the maxUsedSize
+        // in the MockDirectoryWrapper:
+        long size = dir.getRecomputedActualSizeInBytes();
+        if (size > dir.maxUsedSize) {
+          dir.maxUsedSize = size;
+        }
+      }
+      dir.removeIndexOutput(this, name);
+    }
+  }
+
+  @Override
+  public void flush() throws IOException {
+    dir.maybeThrowDeterministicException();
+    delegate.flush();
+  }
+
+  @Override
+  public void writeByte(byte b) throws IOException {
+    singleByte[0] = b;
+    writeBytes(singleByte, 0, 1);
+  }
+  
+  @Override
+  public void writeBytes(byte[] b, int offset, int len) throws IOException {
+    long freeSpace = dir.maxSize == 0 ? 0 : dir.maxSize - dir.sizeInBytes();
+    long realUsage = 0;
+
+    // If MockRAMDir crashed since we were opened, then
+    // don't write anything:
+    if (dir.crashed)
+      throw new IOException("MockRAMDirectory was crashed; cannot write to " + name);
+
+    // Enforce disk full:
+    if (dir.maxSize != 0 && freeSpace <= len) {
+      // Compute the real disk free.  This will greatly slow
+      // down our test but makes it more accurate:
+      realUsage = dir.getRecomputedActualSizeInBytes();
+      freeSpace = dir.maxSize - realUsage;
+    }
+
+    if (dir.maxSize != 0 && freeSpace <= len) {
+      if (freeSpace > 0) {
+        realUsage += freeSpace;
+        delegate.writeBytes(b, offset, (int) freeSpace);
+      }
+      if (realUsage > dir.maxUsedSize) {
+        dir.maxUsedSize = realUsage;
+      }
+      String message = "fake disk full at " + dir.getRecomputedActualSizeInBytes() + " bytes when writing " + name + " (file length=" + delegate.length();
+      if (freeSpace > 0) {
+        message += "; wrote " + freeSpace + " of " + len + " bytes";
+      }
+      message += ")";
+      if (LuceneTestCase.VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": MDW: now throw fake disk full");
+        new Throwable().printStackTrace(System.out);
+      }
+      throw new IOException(message);
+    } else {
+      if (dir.randomState.nextInt(200) == 0) {
+        final int half = len/2;
+        delegate.writeBytes(b, offset, half);
+        Thread.yield();
+        delegate.writeBytes(b, offset+half, len-half);
+      } else {
+        delegate.writeBytes(b, offset, len);
+      }
+    }
+
+    dir.maybeThrowDeterministicException();
+
+    if (first) {
+      // Maybe throw random exception; only do this on first
+      // write to a new file:
+      first = false;
+      dir.maybeThrowIOException();
+    }
+  }
+
+  @Override
+  public long getFilePointer() {
+    return delegate.getFilePointer();
+  }
+
+  @Override
+  public void seek(long pos) throws IOException {
+    delegate.seek(pos);
+  }
+
+  @Override
+  public long length() throws IOException {
+    return delegate.length();
+  }
+
+  @Override
+  public void setLength(long length) throws IOException {
+    delegate.setLength(length);
+  }
+
+  @Override
+  public void copyBytes(DataInput input, long numBytes) throws IOException {
+    delegate.copyBytes(input, numBytes);
+    // TODO: we may need to check disk full here as well
+    dir.maybeThrowDeterministicException();
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/store/MockLockFactoryWrapper.java b/lucene/backwards/src/test-framework/org/apache/lucene/store/MockLockFactoryWrapper.java
new file mode 100644
index 0000000..b51889f
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/store/MockLockFactoryWrapper.java
@@ -0,0 +1,87 @@
+package org.apache.lucene.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+public class MockLockFactoryWrapper extends LockFactory {
+  MockDirectoryWrapper dir;
+  LockFactory delegate;
+  
+  public MockLockFactoryWrapper(MockDirectoryWrapper dir, LockFactory delegate) {
+    this.dir = dir;
+    this.delegate = delegate;
+  }
+  
+  @Override
+  public void setLockPrefix(String lockPrefix) {
+    delegate.setLockPrefix(lockPrefix);
+  }
+
+  @Override
+  public String getLockPrefix() {
+    return delegate.getLockPrefix();
+  }
+
+  @Override
+  public Lock makeLock(String lockName) {
+    return new MockLock(delegate.makeLock(lockName), lockName);
+  }
+
+  @Override
+  public void clearLock(String lockName) throws IOException {
+    delegate.clearLock(lockName);
+    dir.openLocks.remove(lockName);
+  }
+  
+  @Override
+  public String toString() {
+    return "MockLockFactoryWrapper(" + delegate.toString() + ")";
+  }
+
+  private class MockLock extends Lock {
+    private Lock delegateLock;
+    private String name;
+    
+    MockLock(Lock delegate, String name) {
+      this.delegateLock = delegate;
+      this.name = name;
+    }
+
+    @Override
+    public boolean obtain() throws IOException {
+      if (delegateLock.obtain()) {
+        dir.openLocks.add(name);
+        return true;
+      } else {
+        return false;
+      }
+    }
+
+    @Override
+    public void release() throws IOException {
+      delegateLock.release();
+      dir.openLocks.remove(name);
+    }
+
+    @Override
+    public boolean isLocked() throws IOException {
+      return delegateLock.isLocked();
+    }
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/store/_TestHelper.java b/lucene/backwards/src/test-framework/org/apache/lucene/store/_TestHelper.java
new file mode 100644
index 0000000..fb90a87
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/store/_TestHelper.java
@@ -0,0 +1,65 @@
+package org.apache.lucene.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.store.SimpleFSDirectory.SimpleFSIndexInput;
+
+/** This class provides access to package-level features defined in the
+ *  store package. It is used for testing only.
+ */
+public class _TestHelper {
+
+    /** Returns true if the instance of the provided input stream is actually
+     *  an SimpleFSIndexInput.
+     */
+    public static boolean isSimpleFSIndexInput(IndexInput is) {
+        return is instanceof SimpleFSIndexInput;
+    }
+
+    /** Returns true if the provided input stream is an SimpleFSIndexInput and
+     *  is a clone, that is it does not own its underlying file descriptor.
+     */
+    public static boolean isSimpleFSIndexInputClone(IndexInput is) {
+        if (isSimpleFSIndexInput(is)) {
+            return ((SimpleFSIndexInput) is).isClone;
+        } else {
+            return false;
+        }
+    }
+
+    /** Given an instance of SimpleFSDirectory.SimpleFSIndexInput, this method returns
+     *  true if the underlying file descriptor is valid, and false otherwise.
+     *  This can be used to determine if the OS file has been closed.
+     *  The descriptor becomes invalid when the non-clone instance of the
+     *  SimpleFSIndexInput that owns this descriptor is closed. However, the
+     *  descriptor may possibly become invalid in other ways as well.
+     */
+    public static boolean isSimpleFSIndexInputOpen(IndexInput is)
+    throws IOException
+    {
+        if (isSimpleFSIndexInput(is)) {
+            SimpleFSIndexInput fis = (SimpleFSIndexInput) is;
+            return fis.isFDValid();
+        } else {
+            return false;
+        }
+    }
+
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/util/LineFileDocs.java b/lucene/backwards/src/test-framework/org/apache/lucene/util/LineFileDocs.java
new file mode 100644
index 0000000..a4cd41f
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/util/LineFileDocs.java
@@ -0,0 +1,178 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.io.InputStream;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.zip.GZIPInputStream;
+import java.util.Random;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+
+/** Minimal port of contrib/benchmark's LneDocSource +
+ * DocMaker, so tests can enum docs from a line file created
+ * by contrib/benchmark's WriteLineDoc task */
+public class LineFileDocs implements Closeable {
+
+  private BufferedReader reader;
+  private final static int BUFFER_SIZE = 1 << 16;     // 64K
+  private final AtomicInteger id = new AtomicInteger();
+  private final String path;
+
+  /** If forever is true, we rewind the file at EOF (repeat
+   * the docs over and over) */
+  public LineFileDocs(Random random, String path) throws IOException {
+    this.path = path;
+    open(random);
+  }
+
+  public LineFileDocs(Random random) throws IOException {
+    this(random, LuceneTestCase.TEST_LINE_DOCS_FILE);
+  }
+
+  public synchronized void close() throws IOException {
+    if (reader != null) {
+      reader.close();
+      reader = null;
+    }
+  }
+
+  private synchronized void open(Random random) throws IOException {
+    InputStream is = getClass().getResourceAsStream(path);
+    if (is == null) {
+      // if its not in classpath, we load it as absolute filesystem path (e.g. Hudson's home dir)
+      is = new FileInputStream(path);
+    }
+    File file = new File(path);
+    long size;
+    if (file.exists()) {
+      size = file.length();
+    } else {
+      size = is.available();
+    }
+    if (path.endsWith(".gz")) {
+      is = new GZIPInputStream(is);
+      // guestimate:
+      size *= 2.8;
+    }
+
+    reader = new BufferedReader(new InputStreamReader(is, "UTF-8"), BUFFER_SIZE);
+
+    // Override sizes for currently "known" line files:
+    if (path.equals("europarl.lines.txt.gz")) {
+      size = 15129506L;
+    } else if (path.equals("/home/hudson/lucene-data/enwiki.random.lines.txt.gz")) {
+      size = 3038178822L;
+    }
+
+    // Randomly seek to starting point:
+    if (random != null && size > 3) {
+      final long seekTo = (random.nextLong()&Long.MAX_VALUE) % (size/3);
+      if (LuceneTestCase.VERBOSE) {
+        System.out.println("TEST: LineFileDocs: seek to fp=" + seekTo + " on open");
+      }
+      reader.skip(seekTo);
+      reader.readLine();
+    }
+  }
+
+  public synchronized void reset(Random random) throws IOException {
+    close();
+    open(random);
+    id.set(0);
+  }
+
+  private final static char SEP = '\t';
+
+  private static final class DocState {
+    final Document doc;
+    final Field titleTokenized;
+    final Field title;
+    final Field body;
+    final Field id;
+    final Field date;
+
+    public DocState() {
+      doc = new Document();
+      
+      title = new Field("title", "", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS);
+      doc.add(title);
+
+      titleTokenized = new Field("titleTokenized", "", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+      doc.add(titleTokenized);
+
+      body = new Field("body", "", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+      doc.add(body);
+
+      id = new Field("docid", "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
+      doc.add(id);
+
+      date = new Field("date", "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
+      doc.add(date);
+    }
+  }
+
+  private final ThreadLocal<DocState> threadDocs = new ThreadLocal<DocState>();
+
+  /** Note: Document instance is re-used per-thread */
+  public Document nextDoc() throws IOException {
+    String line;
+    synchronized(this) {
+      line = reader.readLine();
+      if (line == null) {
+        // Always rewind at end:
+        if (LuceneTestCase.VERBOSE) {
+          System.out.println("TEST: LineFileDocs: now rewind file...");
+        }
+        close();
+        open(null);
+        line = reader.readLine();
+      }
+    }
+
+    DocState docState = threadDocs.get();
+    if (docState == null) {
+      docState = new DocState();
+      threadDocs.set(docState);
+    }
+
+    int spot = line.indexOf(SEP);
+    if (spot == -1) {
+      throw new RuntimeException("line: [" + line + "] is in an invalid format !");
+    }
+    int spot2 = line.indexOf(SEP, 1 + spot);
+    if (spot2 == -1) {
+      throw new RuntimeException("line: [" + line + "] is in an invalid format !");
+    }
+
+    docState.body.setValue(line.substring(1+spot2, line.length()));
+    final String title = line.substring(0, spot);
+    docState.title.setValue(title);
+    docState.titleTokenized.setValue(title);
+    docState.date.setValue(line.substring(1+spot, spot2));
+    docState.id.setValue(Integer.toString(id.getAndIncrement()));
+    return docState.doc;
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/util/LuceneJUnitDividingSelector.java b/lucene/backwards/src/test-framework/org/apache/lucene/util/LuceneJUnitDividingSelector.java
new file mode 100644
index 0000000..5a9509c
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/util/LuceneJUnitDividingSelector.java
@@ -0,0 +1,66 @@
+/**
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+package org.apache.lucene.util;
+import java.io.File;
+
+import org.apache.tools.ant.BuildException;
+import org.apache.tools.ant.types.Parameter;
+import org.apache.tools.ant.types.selectors.BaseExtendSelector;
+
+/** Divides filesets into equal groups */
+public class LuceneJUnitDividingSelector extends BaseExtendSelector {
+  private int counter;
+  /** Number of total parts to split. */
+  private int divisor;
+  /** Current part to accept. */
+  private int part;
+
+  @Override
+  public void setParameters(Parameter[] pParameters) {
+    super.setParameters(pParameters);
+    for (int j = 0; j < pParameters.length; j++) {
+      Parameter p = pParameters[j];
+      if ("divisor".equalsIgnoreCase(p.getName())) {
+        divisor = Integer.parseInt(p.getValue());
+      }
+      else if ("part".equalsIgnoreCase(p.getName())) {
+        part = Integer.parseInt(p.getValue());
+      }
+      else {
+        throw new BuildException("unknown " + p.getName());
+      }
+    }
+  }
+
+  @Override
+  public void verifySettings() {
+    super.verifySettings();
+    if (divisor <= 0 || part <= 0) {
+      throw new BuildException("part or divisor not set");
+    }
+    if (part > divisor) {
+      throw new BuildException("part must be <= divisor");
+    }
+  }
+
+  @Override
+  public boolean isSelected(File dir, String name, File path) {
+    counter = counter % divisor + 1;
+    return counter == part;
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/util/LuceneJUnitResultFormatter.java b/lucene/backwards/src/test-framework/org/apache/lucene/util/LuceneJUnitResultFormatter.java
new file mode 100644
index 0000000..a03f780
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/util/LuceneJUnitResultFormatter.java
@@ -0,0 +1,293 @@
+/**
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+package org.apache.lucene.util;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.text.NumberFormat;
+import java.util.logging.LogManager;
+
+import junit.framework.AssertionFailedError;
+import junit.framework.Test;
+
+import org.apache.lucene.store.LockReleaseFailedException;
+import org.apache.lucene.store.NativeFSLockFactory;
+import org.apache.tools.ant.taskdefs.optional.junit.JUnitResultFormatter;
+import org.apache.tools.ant.taskdefs.optional.junit.JUnitTest;
+import org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner;
+import org.apache.tools.ant.util.FileUtils;
+import org.apache.tools.ant.util.StringUtils;
+import org.junit.Ignore;
+
+/**
+ * Just like BriefJUnitResultFormatter "brief" bundled with ant,
+ * except all formatted text is buffered until the test suite is finished.
+ * At this point, the output is written at once in synchronized fashion.
+ * This way tests can run in parallel without interleaving output.
+ */
+public class LuceneJUnitResultFormatter implements JUnitResultFormatter {
+  private static final double ONE_SECOND = 1000.0;
+  
+  private static final NativeFSLockFactory lockFactory;
+  
+  /** Where to write the log to. */
+  private OutputStream out;
+  
+  /** Formatter for timings. */
+  private NumberFormat numberFormat = NumberFormat.getInstance();
+  
+  /** Output suite has written to System.out */
+  private String systemOutput = null;
+  
+  /** Output suite has written to System.err */
+  private String systemError = null;
+  
+  /** Buffer output until the end of the test */
+  private ByteArrayOutputStream sb; // use a BOS for our mostly ascii-output
+
+  private static final org.apache.lucene.store.Lock lock;
+
+  static {
+    File lockDir = new File(System.getProperty("java.io.tmpdir"),
+        "lucene_junit_lock");
+    lockDir.mkdirs();
+    if (!lockDir.exists()) {
+      throw new RuntimeException("Could not make Lock directory:" + lockDir);
+    }
+    try {
+      lockFactory = new NativeFSLockFactory(lockDir);
+      lock = lockFactory.makeLock("junit_lock");
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  /** Constructor for LuceneJUnitResultFormatter. */
+  public LuceneJUnitResultFormatter() {
+  }
+  
+  /**
+   * Sets the stream the formatter is supposed to write its results to.
+   * @param out the output stream to write to
+   */
+  public void setOutput(OutputStream out) {
+    this.out = out;
+  }
+  
+  /**
+   * @see JUnitResultFormatter#setSystemOutput(String)
+   */
+  /** {@inheritDoc}. */
+  public void setSystemOutput(String out) {
+    systemOutput = out;
+  }
+  
+  /**
+   * @see JUnitResultFormatter#setSystemError(String)
+   */
+  /** {@inheritDoc}. */
+  public void setSystemError(String err) {
+    systemError = err;
+  }
+  
+  
+  /**
+   * The whole testsuite started.
+   * @param suite the test suite
+   */
+  public synchronized void startTestSuite(JUnitTest suite) {
+    if (out == null) {
+      return; // Quick return - no output do nothing.
+    }
+    sb = new ByteArrayOutputStream(); // don't reuse, so its gc'ed
+    try {
+      LogManager.getLogManager().readConfiguration();
+    } catch (Exception e) {}
+    append("Testsuite: ");
+    append(suite.getName());
+    append(StringUtils.LINE_SEP);
+  }
+  
+  /**
+   * The whole testsuite ended.
+   * @param suite the test suite
+   */
+  public synchronized void endTestSuite(JUnitTest suite) {
+    append("Tests run: ");
+    append(suite.runCount());
+    append(", Failures: ");
+    append(suite.failureCount());
+    append(", Errors: ");
+    append(suite.errorCount());
+    append(", Time elapsed: ");
+    append(numberFormat.format(suite.getRunTime() / ONE_SECOND));
+    append(" sec");
+    append(StringUtils.LINE_SEP);
+    append(StringUtils.LINE_SEP);
+    
+    // append the err and output streams to the log
+    if (systemOutput != null && systemOutput.length() > 0) {
+      append("------------- Standard Output ---------------")
+      .append(StringUtils.LINE_SEP)
+      .append(systemOutput)
+      .append("------------- ---------------- ---------------")
+      .append(StringUtils.LINE_SEP);
+    }
+    
+    // HACK: junit gives us no way to do this in LuceneTestCase
+    try {
+      Class<?> clazz = Class.forName(suite.getName());
+      Ignore ignore = clazz.getAnnotation(Ignore.class);
+      if (ignore != null) {
+        if (systemError == null) systemError = "";
+        systemError += "NOTE: Ignoring test class '" + clazz.getSimpleName() + "': " 
+                    + ignore.value() + StringUtils.LINE_SEP;
+      }
+    } catch (ClassNotFoundException e) { /* no problem */ }
+    // END HACK
+    
+    if (systemError != null && systemError.length() > 0) {
+      append("------------- Standard Error -----------------")
+      .append(StringUtils.LINE_SEP)
+      .append(systemError)
+      .append("------------- ---------------- ---------------")
+      .append(StringUtils.LINE_SEP);
+    }
+    
+    if (out != null) {
+      try {
+        lock.obtain(5000);
+        try {
+          sb.writeTo(out);
+          out.flush();
+        } finally {
+          try {
+            lock.release();
+          } catch(LockReleaseFailedException e) {
+            // well lets pretend its released anyway
+          }
+        }
+      } catch (IOException e) {
+        throw new RuntimeException("unable to write results", e);
+      } finally {
+        if (out != System.out && out != System.err) {
+          FileUtils.close(out);
+        }
+      }
+    }
+  }
+  
+  /**
+   * A test started.
+   * @param test a test
+   */
+  public void startTest(Test test) {
+  }
+  
+  /**
+   * A test ended.
+   * @param test a test
+   */
+  public void endTest(Test test) {
+  }
+  
+  /**
+   * Interface TestListener for JUnit &lt;= 3.4.
+   *
+   * <p>A Test failed.
+   * @param test a test
+   * @param t    the exception thrown by the test
+   */
+  public void addFailure(Test test, Throwable t) {
+    formatError("\tFAILED", test, t);
+  }
+  
+  /**
+   * Interface TestListener for JUnit &gt; 3.4.
+   *
+   * <p>A Test failed.
+   * @param test a test
+   * @param t    the assertion failed by the test
+   */
+  public void addFailure(Test test, AssertionFailedError t) {
+    addFailure(test, (Throwable) t);
+  }
+  
+  /**
+   * A test caused an error.
+   * @param test  a test
+   * @param error the error thrown by the test
+   */
+  public void addError(Test test, Throwable error) {
+    formatError("\tCaused an ERROR", test, error);
+  }
+  
+  /**
+   * Format the test for printing..
+   * @param test a test
+   * @return the formatted testname
+   */
+  protected String formatTest(Test test) {
+    if (test == null) {
+      return "Null Test: ";
+    } else {
+      return "Testcase: " + test.toString() + ":";
+    }
+  }
+  
+  /**
+   * Format an error and print it.
+   * @param type the type of error
+   * @param test the test that failed
+   * @param error the exception that the test threw
+   */
+  protected synchronized void formatError(String type, Test test,
+      Throwable error) {
+    if (test != null) {
+      endTest(test);
+    }
+    
+    append(formatTest(test) + type);
+    append(StringUtils.LINE_SEP);
+    append(error.getMessage());
+    append(StringUtils.LINE_SEP);
+    String strace = JUnitTestRunner.getFilteredTrace(error);
+    append(strace);
+    append(StringUtils.LINE_SEP);
+    append(StringUtils.LINE_SEP);
+  }
+
+  public LuceneJUnitResultFormatter append(String s) {
+    if (s == null)
+      s = "(null)";
+    try {
+      sb.write(s.getBytes()); // intentionally use default charset, its a console.
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+    return this;
+  }
+  
+  public LuceneJUnitResultFormatter append(long l) {
+    return append(Long.toString(l));
+  }
+}
+
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/util/LuceneTestCase.java b/lucene/backwards/src/test-framework/org/apache/lucene/util/LuceneTestCase.java
new file mode 100644
index 0000000..55ea4e1
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/util/LuceneTestCase.java
@@ -0,0 +1,1253 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.lang.annotation.Documented;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.reflect.Constructor;
+import java.util.*;
+import java.util.Map.Entry;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LogByteSizeMergePolicy;
+import org.apache.lucene.index.LogDocMergePolicy;
+import org.apache.lucene.index.LogMergePolicy;
+import org.apache.lucene.index.MergePolicy;
+import org.apache.lucene.index.MockRandomMergePolicy;
+import org.apache.lucene.index.SerialMergeScheduler;
+import org.apache.lucene.index.SlowMultiReaderWrapper;
+import org.apache.lucene.index.TieredMergePolicy;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.FieldCache.CacheEntry;
+import org.apache.lucene.search.AssertingIndexSearcher;
+import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.LockFactory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.FieldCacheSanityChecker.Insanity;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.rules.TestWatchman;
+import org.junit.runner.RunWith;
+import org.junit.runners.model.FrameworkMethod;
+
+/**
+ * Base class for all Lucene unit tests, Junit3 or Junit4 variant.
+ * <p>
+ * </p>
+ * <p>
+ * If you
+ * override either <code>setUp()</code> or
+ * <code>tearDown()</code> in your unit test, make sure you
+ * call <code>super.setUp()</code> and
+ * <code>super.tearDown()</code>
+ * </p>
+ *
+ * <code>@After</code> - replaces setup
+ * <code>@Before</code> - replaces teardown
+ * <code>@Test</code> - any public method with this annotation is a test case, regardless
+ * of its name
+ * <p>
+ * <p>
+ * See Junit4 <a href="http://junit.org/junit/javadoc/4.7/">documentation</a> for a complete list of features.
+ * <p>
+ * Import from org.junit rather than junit.framework.
+ * <p>
+ * You should be able to use this class anywhere you used LuceneTestCase
+ * if you annotate your derived class correctly with the annotations above
+ * @see #assertSaneFieldCaches(String)
+ */
+
+@RunWith(LuceneTestCaseRunner.class)
+public abstract class LuceneTestCase extends Assert {
+
+  /**
+   * true iff tests are run in verbose mode. Note: if it is false, tests are not
+   * expected to print any messages.
+   */
+  public static final boolean VERBOSE = Boolean.getBoolean("tests.verbose");
+
+  /** Use this constant when creating Analyzers and any other version-dependent stuff.
+   * <p><b>NOTE:</b> Change this when development starts for new Lucene version:
+   */
+  public static final Version TEST_VERSION_CURRENT = Version.LUCENE_34;
+
+  /**
+   * If this is set, it is the only method that should run.
+   */
+  static final String TEST_METHOD;
+  
+  /** Create indexes in this directory, optimally use a subdir, named after the test */
+  public static final File TEMP_DIR;
+  static {
+    String method = System.getProperty("testmethod", "").trim();
+    TEST_METHOD = method.length() == 0 ? null : method;
+    String s = System.getProperty("tempDir", System.getProperty("java.io.tmpdir"));
+    if (s == null)
+      throw new RuntimeException("To run tests, you need to define system property 'tempDir' or 'java.io.tmpdir'.");
+    TEMP_DIR = new File(s);
+    TEMP_DIR.mkdirs();
+  }
+  
+  /** set of directories we created, in afterclass we try to clean these up */
+  private static final Map<File, StackTraceElement[]> tempDirs = Collections.synchronizedMap(new HashMap<File, StackTraceElement[]>());
+
+  // by default we randomly pick a different codec for
+  // each test case (non-J4 tests) and each test class (J4
+  // tests)
+  /** Gets the locale to run tests with */
+  public static final String TEST_LOCALE = System.getProperty("tests.locale", "random");
+  /** Gets the timezone to run tests with */
+  public static final String TEST_TIMEZONE = System.getProperty("tests.timezone", "random");
+  /** Gets the directory to run tests with */
+  public static final String TEST_DIRECTORY = System.getProperty("tests.directory", "random");
+  /** Get the number of times to run tests */
+  public static final int TEST_ITER = Integer.parseInt(System.getProperty("tests.iter", "1"));
+  /** Get the minimum number of times to run tests until a failure happens */
+  public static final int TEST_ITER_MIN = Integer.parseInt(System.getProperty("tests.iter.min", Integer.toString(TEST_ITER)));
+  /** Get the random seed for tests */
+  public static final String TEST_SEED = System.getProperty("tests.seed", "random");
+  /** whether or not nightly tests should run */
+  public static final boolean TEST_NIGHTLY = Boolean.parseBoolean(System.getProperty("tests.nightly", "false"));
+  /** the line file used by LineFileDocs */
+  public static final String TEST_LINE_DOCS_FILE = System.getProperty("tests.linedocsfile", "europarl.lines.txt.gz");
+  /** whether or not to clean threads between test invocations: "false", "perMethod", "perClass" */
+  public static final String TEST_CLEAN_THREADS = System.getProperty("tests.cleanthreads", "perClass");
+
+  /**
+   * A random multiplier which you should use when writing random tests:
+   * multiply it by the number of iterations
+   */
+  public static final int RANDOM_MULTIPLIER = Integer.parseInt(System.getProperty("tests.multiplier", "1"));
+  
+  private int savedBoolMaxClauseCount = BooleanQuery.getMaxClauseCount();
+
+  private volatile Thread.UncaughtExceptionHandler savedUncaughtExceptionHandler = null;
+  
+  /** Used to track if setUp and tearDown are called correctly from subclasses */
+  private static State state = State.INITIAL;
+
+  private static enum State {
+    INITIAL, // no tests ran yet
+    SETUP,   // test has called setUp()
+    RANTEST, // test is running
+    TEARDOWN // test has called tearDown()
+  }
+  
+  private static class UncaughtExceptionEntry {
+    public final Thread thread;
+    public final Throwable exception;
+    
+    public UncaughtExceptionEntry(Thread thread, Throwable exception) {
+      this.thread = thread;
+      this.exception = exception;
+    }
+  }
+  private List<UncaughtExceptionEntry> uncaughtExceptions = Collections.synchronizedList(new ArrayList<UncaughtExceptionEntry>());
+  
+  private static Locale locale;
+  private static Locale savedLocale;
+  private static TimeZone timeZone;
+  private static TimeZone savedTimeZone;
+  
+  protected static Map<MockDirectoryWrapper,StackTraceElement[]> stores;
+  
+  /** @deprecated: until we fix no-fork problems in solr tests */
+  @Deprecated
+  static List<String> testClassesRun = new ArrayList<String>();
+  
+  private static void initRandom() {
+    assert !random.initialized;
+    staticSeed = "random".equals(TEST_SEED) ? seedRand.nextLong() : ThreeLongs.fromString(TEST_SEED).l1;
+    random.setSeed(staticSeed);
+    random.initialized = true;
+  }
+  
+  @Deprecated
+  private static boolean icuTested = false;
+
+  @BeforeClass
+  public static void beforeClassLuceneTestCaseJ4() {
+    initRandom();
+    state = State.INITIAL;
+    tempDirs.clear();
+    stores = Collections.synchronizedMap(new IdentityHashMap<MockDirectoryWrapper,StackTraceElement[]>());
+    // enable this by default, for IDE consistency with ant tests (as its the default from ant)
+    // TODO: really should be in solr base classes, but some extend LTC directly.
+    // we do this in beforeClass, because some tests currently disable it
+    if (System.getProperty("solr.directoryFactory") == null) {
+      System.setProperty("solr.directoryFactory", "org.apache.solr.core.MockDirectoryFactory");
+    }
+    // this code consumes randoms where 4.0's lucenetestcase would: to make seeds work across both branches.
+    // TODO: doesn't completely work, because what if we get mockrandom codec?!
+    if (random.nextInt(4) != 0) {
+      random.nextInt(); // consume RandomCodecProvider's seed.
+    }
+    // end compatibility random-consumption
+    
+    savedLocale = Locale.getDefault();
+    
+    // START hack to init ICU safely before we randomize locales.
+    // ICU fails during classloading when a special Java7-only locale is the default
+    // see: http://bugs.icu-project.org/trac/ticket/8734
+    if (!icuTested) {
+      icuTested = true;
+      try {
+        Locale.setDefault(Locale.US);
+        Class.forName("com.ibm.icu.util.ULocale");
+      } catch (ClassNotFoundException cnfe) {
+        // ignore if no ICU is in classpath
+      }
+    }
+    // END hack
+    
+    locale = TEST_LOCALE.equals("random") ? randomLocale(random) : localeForName(TEST_LOCALE);
+    Locale.setDefault(locale);
+    savedTimeZone = TimeZone.getDefault();
+    timeZone = TEST_TIMEZONE.equals("random") ? randomTimeZone(random) : TimeZone.getTimeZone(TEST_TIMEZONE);
+    TimeZone.setDefault(timeZone);
+    testsFailed = false;
+  }
+  
+  @AfterClass
+  public static void afterClassLuceneTestCaseJ4() {
+    State oldState = state; // capture test execution state
+    state = State.INITIAL; // set the state for subsequent tests
+    
+    Throwable problem = null;
+    try {
+      if (!testsFailed) {
+        assertTrue("ensure your setUp() calls super.setUp() and your tearDown() calls super.tearDown()!!!", 
+          oldState == State.INITIAL || oldState == State.TEARDOWN);
+      }
+    } catch (Throwable t) {
+      if (problem == null) problem = t;
+    }
+    
+    if (! "false".equals(TEST_CLEAN_THREADS)) {
+      int rogueThreads = threadCleanup("test class");
+      if (rogueThreads > 0) {
+        // TODO: fail here once the leaks are fixed.
+        System.err.println("RESOURCE LEAK: test class left " + rogueThreads + " thread(s) running");
+      }
+    }
+
+    Locale.setDefault(savedLocale);
+    TimeZone.setDefault(savedTimeZone);
+    System.clearProperty("solr.solr.home");
+    System.clearProperty("solr.data.dir");
+    
+    try {
+      // now look for unclosed resources
+      if (!testsFailed) {
+        checkResourcesAfterClass();
+      }
+    } catch (Throwable t) {
+      if (problem == null) problem = t;
+    }
+    
+    stores = null;
+
+    try {
+      // clear out any temp directories if we can
+      if (!testsFailed) {
+        clearTempDirectoriesAfterClass();
+      }
+    } catch (Throwable t) {
+      if (problem == null) problem = t;
+    }
+
+    // if we had afterClass failures, get some debugging information
+    if (problem != null) {
+      reportPartialFailureInfo();      
+    }
+    
+    // if verbose or tests failed, report some information back
+    if (VERBOSE || testsFailed || problem != null) {
+      printDebuggingInformation();
+    }
+    
+    // reset seed
+    random.setSeed(0L);
+    random.initialized = false;
+    
+    if (problem != null) {
+      throw new RuntimeException(problem);
+    }
+  }
+  
+  /** print some useful debugging information about the environment */
+  private static void printDebuggingInformation() {
+    System.err.println("NOTE: test params are: " +
+        "locale=" + locale +
+        ", timezone=" + (timeZone == null ? "(null)" : timeZone.getID()));
+    System.err.println("NOTE: all tests run in this JVM:");
+    System.err.println(Arrays.toString(testClassesRun.toArray()));
+    System.err.println("NOTE: " + System.getProperty("os.name") + " "
+        + System.getProperty("os.version") + " "
+        + System.getProperty("os.arch") + "/"
+        + System.getProperty("java.vendor") + " "
+        + System.getProperty("java.version") + " "
+        + (Constants.JRE_IS_64BIT ? "(64-bit)" : "(32-bit)") + "/"
+        + "cpus=" + Runtime.getRuntime().availableProcessors() + ","
+        + "threads=" + Thread.activeCount() + ","
+        + "free=" + Runtime.getRuntime().freeMemory() + ","
+        + "total=" + Runtime.getRuntime().totalMemory());
+  }
+  
+  /** check that directories and their resources were closed */
+  private static void checkResourcesAfterClass() {
+    for (MockDirectoryWrapper d : stores.keySet()) {
+      if (d.isOpen()) {
+        StackTraceElement elements[] = stores.get(d);
+        // Look for the first class that is not LuceneTestCase that requested
+        // a Directory. The first two items are of Thread's, so skipping over
+        // them.
+        StackTraceElement element = null;
+        for (int i = 2; i < elements.length; i++) {
+          StackTraceElement ste = elements[i];
+          if (ste.getClassName().indexOf("LuceneTestCase") == -1) {
+            element = ste;
+            break;
+          }
+        }
+        fail("directory of test was not closed, opened from: " + element);
+      }
+    }
+  }
+  
+  /** clear temp directories: this will fail if its not successful */
+  private static void clearTempDirectoriesAfterClass() {
+    for (Entry<File, StackTraceElement[]> entry : tempDirs.entrySet()) {
+      try {
+        _TestUtil.rmDir(entry.getKey());
+      } catch (IOException e) {
+        e.printStackTrace();
+        System.err.println("path " + entry.getKey() + " allocated from");
+        // first two STE's are Java's
+        StackTraceElement[] elements = entry.getValue();
+        for (int i = 2; i < elements.length; i++) {
+          StackTraceElement ste = elements[i];            
+          // print only our code's stack information
+          if (ste.getClassName().indexOf("org.apache.lucene") == -1) break; 
+          System.err.println("\t" + ste);
+        }
+        fail("could not remove temp dir: " + entry.getKey());
+      }
+    }
+  }
+
+  protected static boolean testsFailed; /* true if any tests failed */
+  
+  // This is how we get control when errors occur.
+  // Think of this as start/end/success/failed
+  // events.
+  @Rule
+  public final TestWatchman intercept = new TestWatchman() {
+
+    @Override
+    public void failed(Throwable e, FrameworkMethod method) {
+      // org.junit.internal.AssumptionViolatedException in older releases
+      // org.junit.Assume.AssumptionViolatedException in recent ones
+      if (e.getClass().getName().endsWith("AssumptionViolatedException")) {
+        if (e.getCause() instanceof _TestIgnoredException)
+          e = e.getCause();
+        System.err.print("NOTE: Assume failed in '" + method.getName() + "' (ignored):");
+        if (VERBOSE) {
+          System.err.println();
+          e.printStackTrace(System.err);
+        } else {
+          System.err.print(" ");
+          System.err.println(e.getMessage());
+        }
+      } else {
+        testsFailed = true;
+        reportAdditionalFailureInfo();
+      }
+      super.failed(e, method);
+    }
+
+    @Override
+    public void starting(FrameworkMethod method) {
+      // set current method name for logging
+      LuceneTestCase.this.name = method.getName();
+      State s = state; // capture test execution state
+      state = State.RANTEST; // set the state for subsequent tests
+      if (!testsFailed) {
+        assertTrue("ensure your setUp() calls super.setUp()!!!", s == State.SETUP);
+      }
+      super.starting(method);
+    }
+  };
+
+  @Before
+  public void setUp() throws Exception {
+    seed = "random".equals(TEST_SEED) ? seedRand.nextLong() : ThreeLongs.fromString(TEST_SEED).l2;
+    random.setSeed(seed);
+    State s = state; // capture test execution state
+    state = State.SETUP; // set the state for subsequent tests
+   
+    savedUncaughtExceptionHandler = Thread.getDefaultUncaughtExceptionHandler();
+    Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
+      public void uncaughtException(Thread t, Throwable e) {
+        testsFailed = true;
+        uncaughtExceptions.add(new UncaughtExceptionEntry(t, e));
+        if (savedUncaughtExceptionHandler != null)
+          savedUncaughtExceptionHandler.uncaughtException(t, e);
+        }
+    });
+    
+    savedBoolMaxClauseCount = BooleanQuery.getMaxClauseCount();
+
+    if (!testsFailed) {
+      assertTrue("ensure your tearDown() calls super.tearDown()!!!", (s == State.INITIAL || s == State.TEARDOWN));
+    }
+  }
+
+  /**
+   * Forcible purges all cache entries from the FieldCache.
+   * <p>
+   * This method will be called by tearDown to clean up FieldCache.DEFAULT.
+   * If a (poorly written) test has some expectation that the FieldCache
+   * will persist across test methods (ie: a static IndexReader) this
+   * method can be overridden to do nothing.
+   * </p>
+   *
+   * @see FieldCache#purgeAllCaches()
+   */
+  protected void purgeFieldCache(final FieldCache fc) {
+    fc.purgeAllCaches();
+  }
+
+  protected String getTestLabel() {
+    return getClass().getName() + "." + getName();
+  }
+
+  public static void setUseCompoundFile(MergePolicy mp, boolean useCompound) {
+    if (mp instanceof LogMergePolicy) {
+      ((LogMergePolicy) mp).setUseCompoundFile(useCompound);
+    } else if (mp instanceof TieredMergePolicy) {
+      ((TieredMergePolicy) mp).setUseCompoundFile(useCompound);
+    } else {
+      fail("MergePolicy (compound-file) not supported " + mp);
+    }
+  }
+
+  public static void setMergeFactor(MergePolicy mp, int mergeFactor) {
+    if (mp instanceof LogMergePolicy) {
+      ((LogMergePolicy) mp).setMergeFactor(mergeFactor);
+    } else if (mp instanceof TieredMergePolicy) {
+      ((TieredMergePolicy) mp).setMaxMergeAtOnce(mergeFactor);
+      ((TieredMergePolicy) mp).setMaxMergeAtOnceExplicit(mergeFactor);
+    } else {
+      fail("MergePolicy not supported " + mp);
+    }
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    State oldState = state; // capture test execution state
+    state = State.TEARDOWN; // set the state for subsequent tests
+    
+    // NOTE: with junit 4.7, we don't get a reproduceWith because our Watchman
+    // does not know if something fails in tearDown. so we ensure this happens ourselves for now.
+    // we can remove this if we upgrade to 4.8
+    Throwable problem = null;
+    
+    try {
+      if (!testsFailed) {
+        // Note: we allow a test to go straight from SETUP -> TEARDOWN (without ever entering the RANTEST state)
+        // because if you assume() inside setUp(), it skips the test and the TestWatchman has no way to know...
+        assertTrue("ensure your setUp() calls super.setUp()!!!", oldState == State.RANTEST || oldState == State.SETUP);
+      }
+    } catch (Throwable t) {
+      if (problem == null) problem = t;
+    }
+
+    BooleanQuery.setMaxClauseCount(savedBoolMaxClauseCount);
+
+    // this won't throw any exceptions or fail the test
+    // if we change this, then change this logic
+    checkRogueThreadsAfter();
+    // restore the default uncaught exception handler
+    Thread.setDefaultUncaughtExceptionHandler(savedUncaughtExceptionHandler);
+    
+    try {
+      checkUncaughtExceptionsAfter();
+    } catch (Throwable t) {
+      if (problem == null) problem = t;
+    }
+    
+    try {
+      // calling assertSaneFieldCaches here isn't as useful as having test 
+      // classes call it directly from the scope where the index readers 
+      // are used, because they could be gc'ed just before this tearDown 
+      // method is called.
+      //
+      // But it's better then nothing.
+      //
+      // If you are testing functionality that you know for a fact 
+      // "violates" FieldCache sanity, then you should either explicitly 
+      // call purgeFieldCache at the end of your test method, or refactor
+      // your Test class so that the inconsistant FieldCache usages are 
+      // isolated in distinct test methods  
+      assertSaneFieldCaches(getTestLabel());
+    } catch (Throwable t) {
+      if (problem == null) problem = t;
+    }
+    
+    purgeFieldCache(FieldCache.DEFAULT);
+    
+    if (problem != null) {
+      testsFailed = true;
+      reportAdditionalFailureInfo();
+      throw new RuntimeException(problem);
+    }
+  }
+  
+  /** check if the test still has threads running, we don't want them to 
+   *  fail in a subsequent test and pass the blame to the wrong test */
+  private void checkRogueThreadsAfter() {
+    if ("perMethod".equals(TEST_CLEAN_THREADS)) {
+      int rogueThreads = threadCleanup("test method: '" + getName() + "'");
+      if (!testsFailed && rogueThreads > 0) {
+        System.err.println("RESOURCE LEAK: test method: '" + getName()
+            + "' left " + rogueThreads + " thread(s) running");
+        // TODO: fail, but print seed for now
+        if (uncaughtExceptions.isEmpty()) {
+          reportAdditionalFailureInfo();
+        }
+      }
+    }
+  }
+  
+  /** see if any other threads threw uncaught exceptions, and fail the test if so */
+  private void checkUncaughtExceptionsAfter() {
+    if (!uncaughtExceptions.isEmpty()) {
+      System.err.println("The following exceptions were thrown by threads:");
+      for (UncaughtExceptionEntry entry : uncaughtExceptions) {
+        System.err.println("*** Thread: " + entry.thread.getName() + " ***");
+        entry.exception.printStackTrace(System.err);
+      }
+      fail("Some threads threw uncaught exceptions!");
+    }
+  }
+
+  private final static int THREAD_STOP_GRACE_MSEC = 50;
+  // jvm-wide list of 'rogue threads' we found, so they only get reported once.
+  private final static IdentityHashMap<Thread,Boolean> rogueThreads = new IdentityHashMap<Thread,Boolean>();
+  
+  static {
+    // just a hack for things like eclipse test-runner threads
+    for (Thread t : Thread.getAllStackTraces().keySet()) {
+      rogueThreads.put(t, true);
+    }
+    
+    if (TEST_ITER > 1) {
+      System.out.println("WARNING: you are using -Dtests.iter=n where n > 1, not all tests support this option.");
+      System.out.println("Some may crash or fail: this is not a bug.");
+    }
+  }
+  
+  /**
+   * Looks for leftover running threads, trying to kill them off,
+   * so they don't fail future tests.
+   * returns the number of rogue threads that it found.
+   */
+  private static int threadCleanup(String context) {
+    // educated guess
+    Thread[] stillRunning = new Thread[Thread.activeCount()+1];
+    int threadCount = 0;
+    int rogueCount = 0;
+    
+    if ((threadCount = Thread.enumerate(stillRunning)) > 1) {
+      while (threadCount == stillRunning.length) {
+        // truncated response
+        stillRunning = new Thread[stillRunning.length*2];
+        threadCount = Thread.enumerate(stillRunning);
+      }
+      
+      for (int i = 0; i < threadCount; i++) {
+        Thread t = stillRunning[i];
+          
+        if (t.isAlive() && 
+            !rogueThreads.containsKey(t) && 
+            t != Thread.currentThread() &&
+            // TODO: TimeLimitingCollector starts a thread statically.... WTF?!
+            !t.getName().equals("TimeLimitedCollector timer thread") &&
+            /* its ok to keep your searcher across test cases */
+            (t.getName().startsWith("LuceneTestCase") && context.startsWith("test method")) == false) {
+          System.err.println("WARNING: " + context  + " left thread running: " + t);
+          rogueThreads.put(t, true);
+          rogueCount++;
+          if (t.getName().startsWith("LuceneTestCase")) {
+            System.err.println("PLEASE CLOSE YOUR INDEXSEARCHERS IN YOUR TEST!!!!");
+            continue;
+          } else {
+            // wait on the thread to die of natural causes
+            try {
+              t.join(THREAD_STOP_GRACE_MSEC);
+            } catch (InterruptedException e) { e.printStackTrace(); }
+          }
+          // try to stop the thread:
+          t.setUncaughtExceptionHandler(null);
+          Thread.setDefaultUncaughtExceptionHandler(null);
+          t.interrupt();
+        }
+      }
+    }
+    return rogueCount;
+  }
+  
+  /**
+   * Asserts that FieldCacheSanityChecker does not detect any
+   * problems with FieldCache.DEFAULT.
+   * <p>
+   * If any problems are found, they are logged to System.err
+   * (allong with the msg) when the Assertion is thrown.
+   * </p>
+   * <p>
+   * This method is called by tearDown after every test method,
+   * however IndexReaders scoped inside test methods may be garbage
+   * collected prior to this method being called, causing errors to
+   * be overlooked. Tests are encouraged to keep their IndexReaders
+   * scoped at the class level, or to explicitly call this method
+   * directly in the same scope as the IndexReader.
+   * </p>
+   *
+   * @see org.apache.lucene.util.FieldCacheSanityChecker
+   */
+  protected void assertSaneFieldCaches(final String msg) {
+    final CacheEntry[] entries = FieldCache.DEFAULT.getCacheEntries();
+    Insanity[] insanity = null;
+    try {
+      try {
+        insanity = FieldCacheSanityChecker.checkSanity(entries);
+      } catch (RuntimeException e) {
+        dumpArray(msg + ": FieldCache", entries, System.err);
+        throw e;
+      }
+
+      assertEquals(msg + ": Insane FieldCache usage(s) found",
+              0, insanity.length);
+      insanity = null;
+    } finally {
+
+      // report this in the event of any exception/failure
+      // if no failure, then insanity will be null anyway
+      if (null != insanity) {
+        dumpArray(msg + ": Insane FieldCache usage(s)", insanity, System.err);
+      }
+
+    }
+  }
+  
+  /**
+   * Returns a number of at least <code>i</code>
+   * <p>
+   * The actual number returned will be influenced by whether {@link #TEST_NIGHTLY}
+   * is active and {@link #RANDOM_MULTIPLIER}, but also with some random fudge.
+   */
+  public static int atLeast(Random random, int i) {
+    int min = (TEST_NIGHTLY ? 3*i : i) * RANDOM_MULTIPLIER;
+    int max = min+(min/2);
+    return _TestUtil.nextInt(random, min, max);
+  }
+  
+  public static int atLeast(int i) {
+    return atLeast(random, i);
+  }
+  
+  /**
+   * Returns true if something should happen rarely,
+   * <p>
+   * The actual number returned will be influenced by whether {@link #TEST_NIGHTLY}
+   * is active and {@link #RANDOM_MULTIPLIER}.
+   */
+  public static boolean rarely(Random random) {
+    int p = TEST_NIGHTLY ? 10 : 5;
+    p += (p * Math.log(RANDOM_MULTIPLIER));
+    int min = 100 - Math.min(p, 50); // never more than 50
+    return random.nextInt(100) >= min;
+  }
+  
+  public static boolean rarely() {
+    return rarely(random);
+  }
+  
+  public static boolean usually(Random random) {
+    return !rarely(random);
+  }
+  
+  public static boolean usually() {
+    return usually(random);
+  }
+
+  // These deprecated methods should be removed soon, when all tests using no Epsilon are fixed:
+  
+  @Deprecated
+  static public void assertEquals(double expected, double actual) {
+    assertEquals(null, expected, actual);
+  }
+   
+  @Deprecated
+  static public void assertEquals(String message, double expected, double actual) {
+    assertEquals(message, Double.valueOf(expected), Double.valueOf(actual));
+  }
+
+  @Deprecated
+  static public void assertEquals(float expected, float actual) {
+    assertEquals(null, expected, actual);
+  }
+
+  @Deprecated
+  static public void assertEquals(String message, float expected, float actual) {
+    assertEquals(message, Float.valueOf(expected), Float.valueOf(actual));
+  }
+  
+  public static void assumeTrue(String msg, boolean b) {
+    Assume.assumeNoException(b ? null : new _TestIgnoredException(msg));
+  }
+ 
+  public static void assumeFalse(String msg, boolean b) {
+    assumeTrue(msg, !b);
+  }
+  
+  public static void assumeNoException(String msg, Exception e) {
+    Assume.assumeNoException(e == null ? null : new _TestIgnoredException(msg, e));
+  }
+ 
+  /**
+   * Convenience method for logging an iterator.
+   *
+   * @param label  String logged before/after the items in the iterator
+   * @param iter   Each next() is toString()ed and logged on it's own line. If iter is null this is logged differnetly then an empty iterator.
+   * @param stream Stream to log messages to.
+   */
+  public static void dumpIterator(String label, Iterator<?> iter,
+                                  PrintStream stream) {
+    stream.println("*** BEGIN " + label + " ***");
+    if (null == iter) {
+      stream.println(" ... NULL ...");
+    } else {
+      while (iter.hasNext()) {
+        stream.println(iter.next().toString());
+      }
+    }
+    stream.println("*** END " + label + " ***");
+  }
+
+  /**
+   * Convenience method for logging an array.  Wraps the array in an iterator and delegates
+   *
+   * @see #dumpIterator(String,Iterator,PrintStream)
+   */
+  public static void dumpArray(String label, Object[] objs,
+                               PrintStream stream) {
+    Iterator<?> iter = (null == objs) ? null : Arrays.asList(objs).iterator();
+    dumpIterator(label, iter, stream);
+  }
+
+  /** create a new index writer config with random defaults */
+  public static IndexWriterConfig newIndexWriterConfig(Version v, Analyzer a) {
+    return newIndexWriterConfig(random, v, a);
+  }
+  
+  /** create a new index writer config with random defaults using the specified random */
+  public static IndexWriterConfig newIndexWriterConfig(Random r, Version v, Analyzer a) {
+    IndexWriterConfig c = new IndexWriterConfig(v, a);
+    if (r.nextBoolean()) {
+      c.setMergePolicy(newTieredMergePolicy());
+    } else if (r.nextBoolean()) {
+      c.setMergePolicy(newLogMergePolicy());
+    } else {
+      c.setMergePolicy(new MockRandomMergePolicy(r));
+    }
+    
+    if (r.nextBoolean()) {
+      c.setMergeScheduler(new SerialMergeScheduler());
+    }
+    if (r.nextBoolean()) {
+      if (rarely(r)) {
+        // crazy value
+        c.setMaxBufferedDocs(_TestUtil.nextInt(r, 2, 7));
+      } else {
+        // reasonable value
+        c.setMaxBufferedDocs(_TestUtil.nextInt(r, 8, 1000));
+      }
+    }
+    if (r.nextBoolean()) {
+      if (rarely(r)) {
+        // crazy value
+        c.setTermIndexInterval(r.nextBoolean() ? _TestUtil.nextInt(r, 1, 31) : _TestUtil.nextInt(r, 129, 1000));
+      } else {
+        // reasonable value
+        c.setTermIndexInterval(_TestUtil.nextInt(r, 32, 128));
+      }
+    }
+    if (r.nextBoolean()) {
+      c.setMaxThreadStates(_TestUtil.nextInt(r, 1, 20));
+    }
+    
+    if (r.nextBoolean()) {
+      c.setMergePolicy(new MockRandomMergePolicy(r));
+    } else {
+      c.setMergePolicy(newLogMergePolicy());
+    }
+    
+    c.setReaderPooling(r.nextBoolean());
+    c.setReaderTermsIndexDivisor(_TestUtil.nextInt(r, 1, 4));
+    return c;
+  }
+
+  public static LogMergePolicy newLogMergePolicy() {
+    return newLogMergePolicy(random);
+  }
+
+  public static TieredMergePolicy newTieredMergePolicy() {
+    return newTieredMergePolicy(random);
+  }
+
+  public static LogMergePolicy newLogMergePolicy(Random r) {
+    LogMergePolicy logmp = r.nextBoolean() ? new LogDocMergePolicy() : new LogByteSizeMergePolicy();
+    logmp.setUseCompoundFile(r.nextBoolean());
+    logmp.setCalibrateSizeByDeletes(r.nextBoolean());
+    if (rarely(r)) {
+      logmp.setMergeFactor(_TestUtil.nextInt(r, 2, 4));
+    } else {
+      logmp.setMergeFactor(_TestUtil.nextInt(r, 5, 50));
+    }
+    return logmp;
+  }
+
+  public static TieredMergePolicy newTieredMergePolicy(Random r) {
+    TieredMergePolicy tmp = new TieredMergePolicy();
+    if (rarely(r)) {
+      tmp.setMaxMergeAtOnce(_TestUtil.nextInt(r, 2, 4));
+      tmp.setMaxMergeAtOnceExplicit(_TestUtil.nextInt(r, 2, 4));
+    } else {
+      tmp.setMaxMergeAtOnce(_TestUtil.nextInt(r, 5, 50));
+      tmp.setMaxMergeAtOnceExplicit(_TestUtil.nextInt(r, 5, 50));
+    }
+    tmp.setMaxMergedSegmentMB(0.2 + r.nextDouble() * 2.0);
+    tmp.setFloorSegmentMB(0.2 + r.nextDouble() * 2.0);
+    tmp.setExpungeDeletesPctAllowed(0.0 + r.nextDouble() * 30.0);
+    tmp.setSegmentsPerTier(_TestUtil.nextInt(r, 2, 20));
+    tmp.setUseCompoundFile(r.nextBoolean());
+    tmp.setNoCFSRatio(0.1 + r.nextDouble()*0.8);
+    tmp.setReclaimDeletesWeight(r.nextDouble()*4);
+    return tmp;
+  }
+
+  public static LogMergePolicy newLogMergePolicy(boolean useCFS) {
+    LogMergePolicy logmp = newLogMergePolicy();
+    logmp.setUseCompoundFile(useCFS);
+    return logmp;
+  }
+
+  public static LogMergePolicy newLogMergePolicy(boolean useCFS, int mergeFactor) {
+    LogMergePolicy logmp = newLogMergePolicy();
+    logmp.setUseCompoundFile(useCFS);
+    logmp.setMergeFactor(mergeFactor);
+    return logmp;
+  }
+
+  public static LogMergePolicy newLogMergePolicy(int mergeFactor) {
+    LogMergePolicy logmp = newLogMergePolicy();
+    logmp.setMergeFactor(mergeFactor);
+    return logmp;
+  }
+
+  /**
+   * Returns a new Directory instance. Use this when the test does not
+   * care about the specific Directory implementation (most tests).
+   * <p>
+   * The Directory is wrapped with {@link MockDirectoryWrapper}.
+   * By default this means it will be picky, such as ensuring that you
+   * properly close it and all open files in your test. It will emulate
+   * some features of Windows, such as not allowing open files to be
+   * overwritten.
+   */
+  public static MockDirectoryWrapper newDirectory() throws IOException {
+    return newDirectory(random);
+  }
+  
+  /**
+   * Returns a new Directory instance, using the specified random.
+   * See {@link #newDirectory()} for more information.
+   */
+  public static MockDirectoryWrapper newDirectory(Random r) throws IOException {
+    Directory impl = newDirectoryImpl(r, TEST_DIRECTORY);
+    MockDirectoryWrapper dir = new MockDirectoryWrapper(r, impl);
+    stores.put(dir, Thread.currentThread().getStackTrace());
+    return dir;
+  }
+  
+  /**
+   * Returns a new Directory instance, with contents copied from the
+   * provided directory. See {@link #newDirectory()} for more
+   * information.
+   */
+  public static MockDirectoryWrapper newDirectory(Directory d) throws IOException {
+    return newDirectory(random, d);
+  }
+  
+  /** Returns a new FSDirectory instance over the given file, which must be a folder. */
+  public static MockDirectoryWrapper newFSDirectory(File f) throws IOException {
+    return newFSDirectory(f, null);
+  }
+  
+  /** Returns a new FSDirectory instance over the given file, which must be a folder. */
+  public static MockDirectoryWrapper newFSDirectory(File f, LockFactory lf) throws IOException {
+    String fsdirClass = TEST_DIRECTORY;
+    if (fsdirClass.equals("random")) {
+      fsdirClass = FS_DIRECTORIES[random.nextInt(FS_DIRECTORIES.length)];
+    }
+    
+    if (fsdirClass.indexOf(".") == -1) {// if not fully qualified, assume .store
+      fsdirClass = "org.apache.lucene.store." + fsdirClass;
+    }
+    
+    Class<? extends FSDirectory> clazz;
+    try {
+      try {
+        clazz = Class.forName(fsdirClass).asSubclass(FSDirectory.class);
+      } catch (ClassCastException e) {
+        // TEST_DIRECTORY is not a sub-class of FSDirectory, so draw one at random
+        fsdirClass = FS_DIRECTORIES[random.nextInt(FS_DIRECTORIES.length)];
+        
+        if (fsdirClass.indexOf(".") == -1) {// if not fully qualified, assume .store
+          fsdirClass = "org.apache.lucene.store." + fsdirClass;
+        }
+        
+        clazz = Class.forName(fsdirClass).asSubclass(FSDirectory.class);
+      }
+      MockDirectoryWrapper dir = new MockDirectoryWrapper(random, newFSDirectoryImpl(clazz, f));
+      if (lf != null) {
+        dir.setLockFactory(lf);
+      }
+      stores.put(dir, Thread.currentThread().getStackTrace());
+      return dir;
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+  
+  /**
+   * Returns a new Directory instance, using the specified random
+   * with contents copied from the provided directory. See 
+   * {@link #newDirectory()} for more information.
+   */
+  public static MockDirectoryWrapper newDirectory(Random r, Directory d) throws IOException {
+    Directory impl = newDirectoryImpl(r, TEST_DIRECTORY);
+    for (String file : d.listAll()) {
+     d.copy(impl, file, file);
+    }
+    MockDirectoryWrapper dir = new MockDirectoryWrapper(r, impl);
+    stores.put(dir, Thread.currentThread().getStackTrace());
+    return dir;
+  }
+  
+  /** Returns a new field instance. 
+   * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
+  public static Field newField(String name, String value, Index index) {
+    return newField(random, name, value, index);
+  }
+  
+  /** Returns a new field instance. 
+   * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
+  public static Field newField(String name, String value, Store store, Index index) {
+    return newField(random, name, value, store, index);
+  }
+  
+  /**
+   * Returns a new Field instance. Use this when the test does not
+   * care about some specific field settings (most tests)
+   * <ul>
+   *  <li>If the store value is set to Store.NO, sometimes the field will be randomly stored.
+   *  <li>More term vector data than you ask for might be indexed, for example if you choose YES
+   *      it might index term vectors with offsets too.
+   * </ul>
+   */
+  public static Field newField(String name, String value, Store store, Index index, TermVector tv) {
+    return newField(random, name, value, store, index, tv);
+  }
+  
+  /** Returns a new field instance, using the specified random. 
+   * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
+  public static Field newField(Random random, String name, String value, Index index) {
+    return newField(random, name, value, Store.NO, index);
+  }
+  
+  /** Returns a new field instance, using the specified random. 
+   * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
+  public static Field newField(Random random, String name, String value, Store store, Index index) {
+    return newField(random, name, value, store, index, TermVector.NO);
+  }
+  
+  /** Returns a new field instance, using the specified random. 
+   * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
+  public static Field newField(Random random, String name, String value, Store store, Index index, TermVector tv) {
+    if (usually(random)) {
+      // most of the time, don't modify the params
+      return new Field(name, value, store, index, tv);
+    }
+
+    if (!index.isIndexed())
+      return new Field(name, value, store, index, tv);
+    
+    if (!store.isStored() && random.nextBoolean())
+      store = Store.YES; // randomly store it
+    
+    tv = randomTVSetting(random, tv);
+    
+    return new Field(name, value, store, index, tv);
+  }
+  
+  static final TermVector tvSettings[] = { 
+    TermVector.NO, TermVector.YES, TermVector.WITH_OFFSETS, 
+    TermVector.WITH_POSITIONS, TermVector.WITH_POSITIONS_OFFSETS 
+  };
+  
+  private static TermVector randomTVSetting(Random random, TermVector minimum) {
+    switch(minimum) {
+      case NO: return tvSettings[_TestUtil.nextInt(random, 0, tvSettings.length-1)];
+      case YES: return tvSettings[_TestUtil.nextInt(random, 1, tvSettings.length-1)];
+      case WITH_OFFSETS: return random.nextBoolean() ? TermVector.WITH_OFFSETS 
+          : TermVector.WITH_POSITIONS_OFFSETS;
+      case WITH_POSITIONS: return random.nextBoolean() ? TermVector.WITH_POSITIONS 
+          : TermVector.WITH_POSITIONS_OFFSETS;
+      default: return TermVector.WITH_POSITIONS_OFFSETS;
+    }
+  }
+  
+  /** return a random Locale from the available locales on the system */
+  public static Locale randomLocale(Random random) {
+    Locale locales[] = Locale.getAvailableLocales();
+    return locales[random.nextInt(locales.length)];
+  }
+  
+  /** return a random TimeZone from the available timezones on the system */
+  public static TimeZone randomTimeZone(Random random) {
+    String tzIds[] = TimeZone.getAvailableIDs();
+    return TimeZone.getTimeZone(tzIds[random.nextInt(tzIds.length)]);
+  }
+  
+  /** return a Locale object equivalent to its programmatic name */
+  public static Locale localeForName(String localeName) {
+    String elements[] = localeName.split("\\_");
+    switch(elements.length) {
+      case 3: return new Locale(elements[0], elements[1], elements[2]);
+      case 2: return new Locale(elements[0], elements[1]);
+      case 1: return new Locale(elements[0]);
+      default: throw new IllegalArgumentException("Invalid Locale: " + localeName);
+    }
+  }
+
+  private static final String FS_DIRECTORIES[] = {
+    "SimpleFSDirectory",
+    "NIOFSDirectory",
+    "MMapDirectory"
+  };
+
+  private static final String CORE_DIRECTORIES[] = {
+    "RAMDirectory",
+    FS_DIRECTORIES[0], FS_DIRECTORIES[1], FS_DIRECTORIES[2]
+  };
+  
+  public static String randomDirectory(Random random) {
+    if (rarely(random)) {
+      return CORE_DIRECTORIES[random.nextInt(CORE_DIRECTORIES.length)];
+    } else {
+      return "RAMDirectory";
+    }
+  }
+
+  private static Directory newFSDirectoryImpl(
+      Class<? extends FSDirectory> clazz, File file)
+      throws IOException {
+    FSDirectory d = null;
+    try {
+      // Assuming every FSDirectory has a ctor(File), but not all may take a
+      // LockFactory too, so setting it afterwards.
+      Constructor<? extends FSDirectory> ctor = clazz.getConstructor(File.class);
+      d = ctor.newInstance(file);
+    } catch (Exception e) {
+      d = FSDirectory.open(file);
+    }
+    return d;
+  }
+  
+  /** Registers a temp file that will be deleted when tests are done. */
+  public static void registerTempFile(File tmpFile) {
+    tempDirs.put(tmpFile.getAbsoluteFile(), Thread.currentThread().getStackTrace());
+  }
+  
+  static Directory newDirectoryImpl(Random random, String clazzName) {
+    if (clazzName.equals("random"))
+      clazzName = randomDirectory(random);
+    if (clazzName.indexOf(".") == -1) // if not fully qualified, assume .store
+      clazzName = "org.apache.lucene.store." + clazzName;
+    try {
+      final Class<? extends Directory> clazz = Class.forName(clazzName).asSubclass(Directory.class);
+      // If it is a FSDirectory type, try its ctor(File)
+      if (FSDirectory.class.isAssignableFrom(clazz)) {
+        final File tmpFile = _TestUtil.createTempFile("test", "tmp", TEMP_DIR);
+        tmpFile.delete();
+        tmpFile.mkdir();
+        registerTempFile(tmpFile);
+        return newFSDirectoryImpl(clazz.asSubclass(FSDirectory.class), tmpFile);
+      }
+
+      // try empty ctor
+      return clazz.newInstance();
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    } 
+  }
+  
+  /** create a new searcher over the reader.
+   * This searcher might randomly use threads. */
+  public static IndexSearcher newSearcher(IndexReader r) throws IOException {
+    return newSearcher(r, true);
+  }
+  
+  /** create a new searcher over the reader.
+   * This searcher might randomly use threads.
+   * if <code>maybeWrap</code> is true, this searcher might wrap the reader
+   * with one that returns null for getSequentialSubReaders.
+   */
+  public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) throws IOException {
+    if (random.nextBoolean()) {
+      if (maybeWrap && rarely()) {
+        r = new SlowMultiReaderWrapper(r);
+      }
+      return new AssertingIndexSearcher(r);
+    } else {
+      int threads = 0;
+      final ExecutorService ex = (random.nextBoolean()) ? null 
+          : Executors.newFixedThreadPool(threads = _TestUtil.nextInt(random, 1, 8), 
+                      new NamedThreadFactory("LuceneTestCase"));
+      if (ex != null && VERBOSE) {
+        System.out.println("NOTE: newSearcher using ExecutorService with " + threads + " threads");
+      }
+      return new AssertingIndexSearcher(r, ex) {
+        @Override
+        public void close() throws IOException {
+          super.close();
+          shutdownExecutorService(ex);
+        }
+      };
+    }
+  }
+  
+  static void shutdownExecutorService(ExecutorService ex) {
+    if (ex != null) {
+      ex.shutdown();
+      try {
+        ex.awaitTermination(1000, TimeUnit.MILLISECONDS);
+      } catch (InterruptedException e) {
+        e.printStackTrace();
+      }
+    }
+  }
+
+  public String getName() {
+    return this.name;
+  }
+  
+  /** Gets a resource from the classpath as {@link File}. This method should only be used,
+   * if a real file is needed. To get a stream, code should prefer
+   * {@link Class#getResourceAsStream} using {@code this.getClass()}.
+   */
+  
+  protected File getDataFile(String name) throws IOException {
+    try {
+      return new File(this.getClass().getResource(name).toURI());
+    } catch (Exception e) {
+      throw new IOException("Cannot find resource: " + name);
+    }
+  }
+
+  // We get here from InterceptTestCaseEvents on the 'failed' event....
+  public static void reportPartialFailureInfo() {
+    System.err.println("NOTE: reproduce with (hopefully): ant test -Dtestcase=" + testClassesRun.get(testClassesRun.size()-1)
+        + " -Dtests.seed=" + new ThreeLongs(staticSeed, 0L, LuceneTestCaseRunner.runnerSeed)
+        + reproduceWithExtraParams());
+  }
+  
+  // We get here from InterceptTestCaseEvents on the 'failed' event....
+  public void reportAdditionalFailureInfo() {
+    System.err.println("NOTE: reproduce with: ant test -Dtestcase=" + getClass().getSimpleName() 
+        + " -Dtestmethod=" + getName() + " -Dtests.seed=" + new ThreeLongs(staticSeed, seed, LuceneTestCaseRunner.runnerSeed)
+        + reproduceWithExtraParams());
+  }
+  
+  // extra params that were overridden needed to reproduce the command
+  private static String reproduceWithExtraParams() {
+    StringBuilder sb = new StringBuilder();
+    if (!TEST_LOCALE.equals("random")) sb.append(" -Dtests.locale=").append(TEST_LOCALE);
+    if (!TEST_TIMEZONE.equals("random")) sb.append(" -Dtests.timezone=").append(TEST_TIMEZONE);
+    if (!TEST_DIRECTORY.equals("random")) sb.append(" -Dtests.directory=").append(TEST_DIRECTORY);
+    if (RANDOM_MULTIPLIER > 1) sb.append(" -Dtests.multiplier=").append(RANDOM_MULTIPLIER);
+    if (TEST_NIGHTLY) sb.append(" -Dtests.nightly=true");
+    return sb.toString();
+  }
+
+  // recorded seed: for beforeClass
+  private static long staticSeed;
+  // seed for individual test methods, changed in @before
+  private long seed;
+  
+  static final Random seedRand = new Random();
+  protected static final SmartRandom random = new SmartRandom(0);
+  
+  private String name = "<unknown>";
+  
+  /**
+   * Annotation for tests that should only be run during nightly builds.
+   */
+  @Documented
+  @Inherited
+  @Retention(RetentionPolicy.RUNTIME)
+  public @interface Nightly {}
+  
+  @Ignore("just a hack")
+  public final void alwaysIgnoredTestMethod() {}
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/util/LuceneTestCaseRunner.java b/lucene/backwards/src/test-framework/org/apache/lucene/util/LuceneTestCaseRunner.java
new file mode 100644
index 0000000..5a9bcdd
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/util/LuceneTestCaseRunner.java
@@ -0,0 +1,176 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.lucene.util.LuceneTestCase.Nightly;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.runner.Description;
+import org.junit.runner.manipulation.Filter;
+import org.junit.runner.manipulation.NoTestsRemainException;
+import org.junit.runner.notification.Failure;
+import org.junit.runner.notification.RunListener;
+import org.junit.runner.notification.RunNotifier;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.model.FrameworkMethod;
+import org.junit.runners.model.InitializationError;
+
+// please don't reorganize these into a wildcard!
+import static org.apache.lucene.util.LuceneTestCase.TEST_ITER;
+import static org.apache.lucene.util.LuceneTestCase.TEST_ITER_MIN;
+import static org.apache.lucene.util.LuceneTestCase.TEST_METHOD;
+import static org.apache.lucene.util.LuceneTestCase.TEST_SEED;
+import static org.apache.lucene.util.LuceneTestCase.TEST_NIGHTLY;
+import static org.apache.lucene.util.LuceneTestCase.VERBOSE;
+
+/** optionally filters the tests to be run by TEST_METHOD */
+public class LuceneTestCaseRunner extends BlockJUnit4ClassRunner {
+  private List<FrameworkMethod> testMethods;
+  static final long runnerSeed;
+  static {
+    runnerSeed = "random".equals(TEST_SEED) ? LuceneTestCase.seedRand.nextLong() : ThreeLongs.fromString(TEST_SEED).l3;
+  }
+  
+  @Override
+  protected List<FrameworkMethod> computeTestMethods() {
+    if (testMethods != null)
+      return testMethods;
+    
+    Random r = new Random(runnerSeed);
+    
+    LuceneTestCase.testClassesRun.add(getTestClass().getJavaClass().getSimpleName());
+    testMethods = new ArrayList<FrameworkMethod>();
+    for (Method m : getTestClass().getJavaClass().getMethods()) {
+      // check if the current test's class has methods annotated with @Ignore
+      final Ignore ignored = m.getAnnotation(Ignore.class);
+      if (ignored != null && !m.getName().equals("alwaysIgnoredTestMethod")) {
+        System.err.println("NOTE: Ignoring test method '" + m.getName() + "': " + ignored.value());
+      }
+      // add methods starting with "test"
+      final int mod = m.getModifiers();
+      if (m.getAnnotation(Test.class) != null ||
+          (m.getName().startsWith("test") &&
+              !Modifier.isAbstract(mod) &&
+              m.getParameterTypes().length == 0 &&
+              m.getReturnType() == Void.TYPE))
+      {
+        if (Modifier.isStatic(mod))
+          throw new RuntimeException("Test methods must not be static.");
+        testMethods.add(new FrameworkMethod(m));
+      }
+    }
+    
+    if (testMethods.isEmpty()) {
+      throw new RuntimeException("No runnable methods!");
+    }
+    
+    if (TEST_NIGHTLY == false) {
+      if (getTestClass().getJavaClass().isAnnotationPresent(Nightly.class)) {
+        /* the test class is annotated with nightly, remove all methods */
+        String className = getTestClass().getJavaClass().getSimpleName();
+        System.err.println("NOTE: Ignoring nightly-only test class '" + className + "'");
+        testMethods.clear();
+      } else {
+        /* remove all nightly-only methods */
+        for (int i = 0; i < testMethods.size(); i++) {
+          final FrameworkMethod m = testMethods.get(i);
+          if (m.getAnnotation(Nightly.class) != null) {
+            System.err.println("NOTE: Ignoring nightly-only test method '" + m.getName() + "'");
+            testMethods.remove(i--);
+          }
+        }
+      }
+      /* dodge a possible "no-runnable methods" exception by adding a fake ignored test */
+      if (testMethods.isEmpty()) {
+        try {
+          testMethods.add(new FrameworkMethod(LuceneTestCase.class.getMethod("alwaysIgnoredTestMethod")));
+        } catch (Exception e) { throw new RuntimeException(e); }
+      }
+    }
+    // sort the test methods first before shuffling them, so that the shuffle is consistent
+    // across different implementations that might order the methods different originally.
+    Collections.sort(testMethods, new Comparator<FrameworkMethod>() {
+      public int compare(FrameworkMethod f1, FrameworkMethod f2) {
+        return f1.getName().compareTo(f2.getName());
+      }
+    });
+    Collections.shuffle(testMethods, r);
+    return testMethods;
+  }
+  
+  @Override
+  protected void runChild(FrameworkMethod arg0, RunNotifier arg1) {
+    if (VERBOSE) {
+      System.out.println("\nNOTE: running test " + arg0.getName());
+    }
+    
+    // only print iteration info if the user requested more than one iterations
+    final boolean verbose = VERBOSE && TEST_ITER > 1;
+    
+    final int currentIter[] = new int[1];
+    arg1.addListener(new RunListener() {
+      @Override
+      public void testFailure(Failure failure) throws Exception {
+        if (verbose) {
+          System.out.println("\nNOTE: iteration " + currentIter[0] + " failed! ");
+        }
+      }
+    });
+    for (int i = 0; i < TEST_ITER; i++) {
+      currentIter[0] = i;
+      if (verbose) {
+        System.out.println("\nNOTE: running iter=" + (1+i) + " of " + TEST_ITER);
+      }
+      super.runChild(arg0, arg1);
+      if (LuceneTestCase.testsFailed) {
+        if (i >= TEST_ITER_MIN - 1) { // XXX is this still off-by-one?
+          break;
+        }
+      }
+    }
+  }
+  
+  public LuceneTestCaseRunner(Class<?> clazz) throws InitializationError {
+    super(clazz);
+    // evil we cannot init our random here, because super() calls computeTestMethods!!!!;
+    Filter f = new Filter() {
+      
+      @Override
+      public String describe() { return "filters according to TEST_METHOD"; }
+      
+      @Override
+      public boolean shouldRun(Description d) {
+        return TEST_METHOD == null || d.getMethodName().equals(TEST_METHOD);
+      }
+    };
+    
+    try {
+      f.apply(this);
+    } catch (NoTestsRemainException e) {
+      throw new RuntimeException(e);
+    }
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/util/SmartRandom.java b/lucene/backwards/src/test-framework/org/apache/lucene/util/SmartRandom.java
new file mode 100644
index 0000000..8e92ba2
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/util/SmartRandom.java
@@ -0,0 +1,43 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Random;
+
+/**
+ * A random that tracks if its been initialized properly,
+ * and throws an exception if it hasn't.
+ */
+public class SmartRandom extends Random {
+  boolean initialized;
+  
+  SmartRandom(long seed) {
+    super(seed);
+  }
+  
+  @Override
+  protected int next(int bits) {
+    if (!initialized) {
+      System.err.println("!!! WARNING: test is using random from static initializer !!!");
+      Thread.dumpStack();
+      // I wish, but it causes JRE crashes
+      // throw new IllegalStateException("you cannot use this random from a static initializer in your test");
+    }
+    return super.next(bits);
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/util/ThreeLongs.java b/lucene/backwards/src/test-framework/org/apache/lucene/util/ThreeLongs.java
new file mode 100644
index 0000000..8911341
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/util/ThreeLongs.java
@@ -0,0 +1,46 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** helper class for a random seed that is really 3 random seeds:
+ *  <ol>
+ *   <li>The test class's random seed: this is what the test sees in its beforeClass methods
+ *   <li>The test method's random seed: this is what the test method sees starting in its befores
+ *   <li>The test runner's random seed (controls the shuffling of test methods)
+ *  </ol>
+ */
+class ThreeLongs {
+  public final long l1, l2, l3;
+  
+  public ThreeLongs(long l1, long l2, long l3) {
+    this.l1 = l1;
+    this.l2 = l2;
+    this.l3 = l3;
+  }
+  
+  @Override
+  public String toString() {
+    return Long.toString(l1, 16) + ":" + Long.toString(l2, 16) + ":" + Long.toString(l3, 16);
+  }
+  
+  public static ThreeLongs fromString(String s) {
+    String parts[] = s.split(":");
+    assert parts.length == 3;
+    return new ThreeLongs(Long.parseLong(parts[0], 16), Long.parseLong(parts[1], 16), Long.parseLong(parts[2], 16));
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/util/ThrottledIndexOutput.java b/lucene/backwards/src/test-framework/org/apache/lucene/util/ThrottledIndexOutput.java
new file mode 100644
index 0000000..fcded42
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/util/ThrottledIndexOutput.java
@@ -0,0 +1,149 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+
+import org.apache.lucene.store.DataInput;
+import org.apache.lucene.store.IndexOutput;
+
+public class ThrottledIndexOutput extends IndexOutput {
+  public static final int DEFAULT_MIN_WRITTEN_BYTES = 1024;
+  private final int bytesPerSecond;
+  private IndexOutput delegate;
+  private long flushDelayMillis;
+  private long closeDelayMillis;
+  private long seekDelayMillis;
+  private long pendingBytes;
+  private long minBytesWritten;
+  private long timeElapsed;
+  private final byte[] bytes = new byte[1];
+
+  public ThrottledIndexOutput newFromDelegate(IndexOutput output) {
+    return new ThrottledIndexOutput(bytesPerSecond, flushDelayMillis,
+        closeDelayMillis, seekDelayMillis, minBytesWritten, output);
+  }
+
+  public ThrottledIndexOutput(int bytesPerSecond, long delayInMillis,
+      IndexOutput delegate) {
+    this(bytesPerSecond, delayInMillis, delayInMillis, delayInMillis,
+        DEFAULT_MIN_WRITTEN_BYTES, delegate);
+  }
+
+  public ThrottledIndexOutput(int bytesPerSecond, long delays,
+      int minBytesWritten, IndexOutput delegate) {
+    this(bytesPerSecond, delays, delays, delays, minBytesWritten, delegate);
+  }
+
+  public static final int mBitsToBytes(int mbits) {
+    return mbits * 125000;
+  }
+
+  public ThrottledIndexOutput(int bytesPerSecond, long flushDelayMillis,
+      long closeDelayMillis, long seekDelayMillis, long minBytesWritten,
+      IndexOutput delegate) {
+    assert bytesPerSecond > 0;
+    this.delegate = delegate;
+    this.bytesPerSecond = bytesPerSecond;
+    this.flushDelayMillis = flushDelayMillis;
+    this.closeDelayMillis = closeDelayMillis;
+    this.seekDelayMillis = seekDelayMillis;
+    this.minBytesWritten = minBytesWritten;
+  }
+
+  @Override
+  public void flush() throws IOException {
+    sleep(flushDelayMillis);
+    delegate.flush();
+  }
+
+  @Override
+  public void close() throws IOException {
+    try {
+      sleep(closeDelayMillis + getDelay(true));
+    } finally {
+      delegate.close();
+    }
+  }
+
+  @Override
+  public long getFilePointer() {
+    return delegate.getFilePointer();
+  }
+
+  @Override
+  public void seek(long pos) throws IOException {
+    sleep(seekDelayMillis);
+    delegate.seek(pos);
+  }
+
+  @Override
+  public long length() throws IOException {
+    return delegate.length();
+  }
+
+  @Override
+  public void writeByte(byte b) throws IOException {
+    bytes[0] = b;
+    writeBytes(bytes, 0, 1);
+  }
+
+  @Override
+  public void writeBytes(byte[] b, int offset, int length) throws IOException {
+    final long before = System.nanoTime();
+    delegate.writeBytes(b, offset, length);
+    timeElapsed += System.nanoTime() - before;
+    pendingBytes += length;
+    sleep(getDelay(false));
+
+  }
+
+  protected long getDelay(boolean closing) {
+    if (pendingBytes > 0 && (closing || pendingBytes > minBytesWritten)) {
+      long actualBps = (timeElapsed / pendingBytes) * 1000000000l; // nano to sec
+      if (actualBps > bytesPerSecond) {
+        long expected = (pendingBytes * 1000l / bytesPerSecond) ;
+        final long delay = expected - (timeElapsed / 1000000l) ;
+        pendingBytes = 0;
+        timeElapsed = 0;
+        return delay;
+      }
+    }
+    return 0;
+
+  }
+
+  private static final void sleep(long ms) {
+    if (ms <= 0)
+      return;
+    try {
+      Thread.sleep(ms);
+    } catch (InterruptedException e) {
+      throw new ThreadInterruptedException(e);
+    }
+  }
+  
+  @Override
+  public void setLength(long length) throws IOException {
+    delegate.setLength(length);
+  }
+
+  @Override
+  public void copyBytes(DataInput input, long numBytes) throws IOException {
+    delegate.copyBytes(input, numBytes);
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/util/_TestIgnoredException.java b/lucene/backwards/src/test-framework/org/apache/lucene/util/_TestIgnoredException.java
new file mode 100644
index 0000000..3664cb0
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/util/_TestIgnoredException.java
@@ -0,0 +1,51 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.PrintStream;
+
+/** Replacement for Assume jUnit class, so we can add a message with explanation */
+final class _TestIgnoredException extends RuntimeException {
+  
+  _TestIgnoredException(String msg) {
+    super(msg);
+  }
+  
+  _TestIgnoredException(String msg, Throwable t) {
+    super(msg, t);
+  }
+  
+  @Override
+  public String getMessage() {
+    StringBuilder sb = new StringBuilder(super.getMessage());
+    if (getCause() != null)
+      sb.append(" - ").append(getCause());
+    return sb.toString();
+  }
+  
+  // only this one is called by our code, exception is not used outside this class:
+  @Override
+  public void printStackTrace(PrintStream s) {
+    if (getCause() != null) {
+      s.println(super.toString() + " - Caused by:");
+      getCause().printStackTrace(s);
+    } else {
+      super.printStackTrace(s);
+    }
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/util/_TestUtil.java b/lucene/backwards/src/test-framework/org/apache/lucene/util/_TestUtil.java
new file mode 100644
index 0000000..c657dc0
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/util/_TestUtil.java
@@ -0,0 +1,514 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.BufferedOutputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.lang.reflect.Method;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipFile;
+
+import org.junit.Assert;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.index.CheckIndex;
+import org.apache.lucene.index.ConcurrentMergeScheduler;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.LogMergePolicy;
+import org.apache.lucene.index.MergePolicy;
+import org.apache.lucene.index.MergeScheduler;
+import org.apache.lucene.index.TieredMergePolicy;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+
+public class _TestUtil {
+
+  /** Returns temp dir, based on String arg in its name;
+   *  does not create the directory. */
+  public static File getTempDir(String desc) {
+    try {
+      File f = createTempFile(desc, "tmp", LuceneTestCase.TEMP_DIR);
+      f.delete();
+      LuceneTestCase.registerTempFile(f);
+      return f;
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * Deletes a directory and everything underneath it.
+   */
+  public static void rmDir(File dir) throws IOException {
+    if (dir.exists()) {
+      for (File f : dir.listFiles()) {
+        if (f.isDirectory()) {
+          rmDir(f);
+        } else {
+          if (!f.delete()) {
+            throw new IOException("could not delete " + f);
+          }
+        }
+      }
+      if (!dir.delete()) {
+        throw new IOException("could not delete " + dir);
+      }
+    }
+  }
+
+  /** 
+   * Convenience method: Unzip zipName + ".zip" under destDir, removing destDir first 
+   */
+  public static void unzip(File zipName, File destDir) throws IOException {
+    
+    ZipFile zipFile = new ZipFile(zipName);
+    
+    Enumeration<? extends ZipEntry> entries = zipFile.entries();
+    
+    rmDir(destDir);
+    
+    destDir.mkdir();
+    LuceneTestCase.registerTempFile(destDir);
+    
+    while (entries.hasMoreElements()) {
+      ZipEntry entry = entries.nextElement();
+      
+      InputStream in = zipFile.getInputStream(entry);
+      File targetFile = new File(destDir, entry.getName());
+      if (entry.isDirectory()) {
+        // allow unzipping with directory structure
+        targetFile.mkdirs();
+      } else {
+        if (targetFile.getParentFile()!=null) {
+          // be on the safe side: do not rely on that directories are always extracted
+          // before their children (although this makes sense, but is it guaranteed?)
+          targetFile.getParentFile().mkdirs();   
+        }
+        OutputStream out = new BufferedOutputStream(new FileOutputStream(targetFile));
+        
+        byte[] buffer = new byte[8192];
+        int len;
+        while((len = in.read(buffer)) >= 0) {
+          out.write(buffer, 0, len);
+        }
+        
+        in.close();
+        out.close();
+      }
+    }
+    
+    zipFile.close();
+  }
+  
+  public static void syncConcurrentMerges(IndexWriter writer) {
+    syncConcurrentMerges(writer.getConfig().getMergeScheduler());
+  }
+
+  public static void syncConcurrentMerges(MergeScheduler ms) {
+    if (ms instanceof ConcurrentMergeScheduler)
+      ((ConcurrentMergeScheduler) ms).sync();
+  }
+
+  /** This runs the CheckIndex tool on the index in.  If any
+   *  issues are hit, a RuntimeException is thrown; else,
+   *  true is returned. */
+  public static CheckIndex.Status checkIndex(Directory dir) throws IOException {
+    ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
+
+    CheckIndex checker = new CheckIndex(dir);
+    checker.setInfoStream(new PrintStream(bos));
+    CheckIndex.Status indexStatus = checker.checkIndex();
+    if (indexStatus == null || indexStatus.clean == false) {
+      System.out.println("CheckIndex failed");
+      System.out.println(bos.toString());
+      throw new RuntimeException("CheckIndex failed");
+    } else {
+      return indexStatus;
+    }
+  }
+
+  /** Use only for testing.
+   *  @deprecated -- in 3.0 we can use Arrays.toString
+   *  instead */
+  @Deprecated
+  public static String arrayToString(int[] array) {
+    StringBuilder buf = new StringBuilder();
+    buf.append("[");
+    for(int i=0;i<array.length;i++) {
+      if (i > 0) {
+        buf.append(" ");
+      }
+      buf.append(array[i]);
+    }
+    buf.append("]");
+    return buf.toString();
+  }
+
+  /** Use only for testing.
+   *  @deprecated -- in 3.0 we can use Arrays.toString
+   *  instead */
+  @Deprecated
+  public static String arrayToString(Object[] array) {
+    StringBuilder buf = new StringBuilder();
+    buf.append("[");
+    for(int i=0;i<array.length;i++) {
+      if (i > 0) {
+        buf.append(" ");
+      }
+      buf.append(array[i]);
+    }
+    buf.append("]");
+    return buf.toString();
+  }
+
+  public static String randomSimpleString(Random r) {
+    final int end = r.nextInt(10);
+    if (end == 0) {
+      // allow 0 length
+      return "";
+    }
+    final char[] buffer = new char[end];
+    for (int i = 0; i < end; i++) {
+      buffer[i] = (char) _TestUtil.nextInt(r, 97, 102);
+    }
+    return new String(buffer, 0, end);
+  }
+
+  /** Returns random string, including full unicode range. */
+  public static String randomUnicodeString(Random r) {
+    return randomUnicodeString(r, 20);
+  }
+
+  /**
+   * Returns a random string up to a certain length.
+   */
+  public static String randomUnicodeString(Random r, int maxLength) {
+    final int end = r.nextInt(maxLength);
+    if (end == 0) {
+      // allow 0 length
+      return "";
+    }
+    final char[] buffer = new char[end];
+    randomFixedLengthUnicodeString(r, buffer, 0, buffer.length);
+    return new String(buffer, 0, end);
+  }
+
+  /**
+   * Fills provided char[] with valid random unicode code
+   * unit sequence.
+   */
+  public static void randomFixedLengthUnicodeString(Random random, char[] chars, int offset, int length) {
+    int i = offset;
+    final int end = offset + length;
+    while(i < end) {
+      final int t = random.nextInt(5);
+      if (0 == t && i < length - 1) {
+        // Make a surrogate pair
+        // High surrogate
+        chars[i++] = (char) nextInt(random, 0xd800, 0xdbff);
+        // Low surrogate
+        chars[i++] = (char) nextInt(random, 0xdc00, 0xdfff);
+      } else if (t <= 1) {
+        chars[i++] = (char) random.nextInt(0x80);
+      } else if (2 == t) {
+        chars[i++] = (char) nextInt(random, 0x80, 0x7ff);
+      } else if (3 == t) {
+        chars[i++] = (char) nextInt(random, 0x800, 0xd7ff);
+      } else if (4 == t) {
+        chars[i++] = (char) nextInt(random, 0xe000, 0xfffe);
+      }
+    }
+  }
+
+  private static final int[] blockStarts = {
+    0x0000, 0x0080, 0x0100, 0x0180, 0x0250, 0x02B0, 0x0300, 0x0370, 0x0400, 
+    0x0500, 0x0530, 0x0590, 0x0600, 0x0700, 0x0750, 0x0780, 0x07C0, 0x0800, 
+    0x0900, 0x0980, 0x0A00, 0x0A80, 0x0B00, 0x0B80, 0x0C00, 0x0C80, 0x0D00, 
+    0x0D80, 0x0E00, 0x0E80, 0x0F00, 0x1000, 0x10A0, 0x1100, 0x1200, 0x1380, 
+    0x13A0, 0x1400, 0x1680, 0x16A0, 0x1700, 0x1720, 0x1740, 0x1760, 0x1780, 
+    0x1800, 0x18B0, 0x1900, 0x1950, 0x1980, 0x19E0, 0x1A00, 0x1A20, 0x1B00, 
+    0x1B80, 0x1C00, 0x1C50, 0x1CD0, 0x1D00, 0x1D80, 0x1DC0, 0x1E00, 0x1F00, 
+    0x2000, 0x2070, 0x20A0, 0x20D0, 0x2100, 0x2150, 0x2190, 0x2200, 0x2300, 
+    0x2400, 0x2440, 0x2460, 0x2500, 0x2580, 0x25A0, 0x2600, 0x2700, 0x27C0, 
+    0x27F0, 0x2800, 0x2900, 0x2980, 0x2A00, 0x2B00, 0x2C00, 0x2C60, 0x2C80, 
+    0x2D00, 0x2D30, 0x2D80, 0x2DE0, 0x2E00, 0x2E80, 0x2F00, 0x2FF0, 0x3000, 
+    0x3040, 0x30A0, 0x3100, 0x3130, 0x3190, 0x31A0, 0x31C0, 0x31F0, 0x3200, 
+    0x3300, 0x3400, 0x4DC0, 0x4E00, 0xA000, 0xA490, 0xA4D0, 0xA500, 0xA640, 
+    0xA6A0, 0xA700, 0xA720, 0xA800, 0xA830, 0xA840, 0xA880, 0xA8E0, 0xA900, 
+    0xA930, 0xA960, 0xA980, 0xAA00, 0xAA60, 0xAA80, 0xABC0, 0xAC00, 0xD7B0, 
+    0xE000, 0xF900, 0xFB00, 0xFB50, 0xFE00, 0xFE10, 
+    0xFE20, 0xFE30, 0xFE50, 0xFE70, 0xFF00, 0xFFF0, 
+    0x10000, 0x10080, 0x10100, 0x10140, 0x10190, 0x101D0, 0x10280, 0x102A0, 
+    0x10300, 0x10330, 0x10380, 0x103A0, 0x10400, 0x10450, 0x10480, 0x10800, 
+    0x10840, 0x10900, 0x10920, 0x10A00, 0x10A60, 0x10B00, 0x10B40, 0x10B60, 
+    0x10C00, 0x10E60, 0x11080, 0x12000, 0x12400, 0x13000, 0x1D000, 0x1D100, 
+    0x1D200, 0x1D300, 0x1D360, 0x1D400, 0x1F000, 0x1F030, 0x1F100, 0x1F200, 
+    0x20000, 0x2A700, 0x2F800, 0xE0000, 0xE0100, 0xF0000, 0x100000
+  };
+  
+  private static final int[] blockEnds = {
+    0x007F, 0x00FF, 0x017F, 0x024F, 0x02AF, 0x02FF, 0x036F, 0x03FF, 0x04FF, 
+    0x052F, 0x058F, 0x05FF, 0x06FF, 0x074F, 0x077F, 0x07BF, 0x07FF, 0x083F, 
+    0x097F, 0x09FF, 0x0A7F, 0x0AFF, 0x0B7F, 0x0BFF, 0x0C7F, 0x0CFF, 0x0D7F, 
+    0x0DFF, 0x0E7F, 0x0EFF, 0x0FFF, 0x109F, 0x10FF, 0x11FF, 0x137F, 0x139F, 
+    0x13FF, 0x167F, 0x169F, 0x16FF, 0x171F, 0x173F, 0x175F, 0x177F, 0x17FF, 
+    0x18AF, 0x18FF, 0x194F, 0x197F, 0x19DF, 0x19FF, 0x1A1F, 0x1AAF, 0x1B7F, 
+    0x1BBF, 0x1C4F, 0x1C7F, 0x1CFF, 0x1D7F, 0x1DBF, 0x1DFF, 0x1EFF, 0x1FFF, 
+    0x206F, 0x209F, 0x20CF, 0x20FF, 0x214F, 0x218F, 0x21FF, 0x22FF, 0x23FF, 
+    0x243F, 0x245F, 0x24FF, 0x257F, 0x259F, 0x25FF, 0x26FF, 0x27BF, 0x27EF, 
+    0x27FF, 0x28FF, 0x297F, 0x29FF, 0x2AFF, 0x2BFF, 0x2C5F, 0x2C7F, 0x2CFF, 
+    0x2D2F, 0x2D7F, 0x2DDF, 0x2DFF, 0x2E7F, 0x2EFF, 0x2FDF, 0x2FFF, 0x303F, 
+    0x309F, 0x30FF, 0x312F, 0x318F, 0x319F, 0x31BF, 0x31EF, 0x31FF, 0x32FF, 
+    0x33FF, 0x4DBF, 0x4DFF, 0x9FFF, 0xA48F, 0xA4CF, 0xA4FF, 0xA63F, 0xA69F, 
+    0xA6FF, 0xA71F, 0xA7FF, 0xA82F, 0xA83F, 0xA87F, 0xA8DF, 0xA8FF, 0xA92F, 
+    0xA95F, 0xA97F, 0xA9DF, 0xAA5F, 0xAA7F, 0xAADF, 0xABFF, 0xD7AF, 0xD7FF, 
+    0xF8FF, 0xFAFF, 0xFB4F, 0xFDFF, 0xFE0F, 0xFE1F, 
+    0xFE2F, 0xFE4F, 0xFE6F, 0xFEFF, 0xFFEF, 0xFFFE, /* avoid 0xFFFF on 3.x */
+    0x1007F, 0x100FF, 0x1013F, 0x1018F, 0x101CF, 0x101FF, 0x1029F, 0x102DF, 
+    0x1032F, 0x1034F, 0x1039F, 0x103DF, 0x1044F, 0x1047F, 0x104AF, 0x1083F, 
+    0x1085F, 0x1091F, 0x1093F, 0x10A5F, 0x10A7F, 0x10B3F, 0x10B5F, 0x10B7F, 
+    0x10C4F, 0x10E7F, 0x110CF, 0x123FF, 0x1247F, 0x1342F, 0x1D0FF, 0x1D1FF, 
+    0x1D24F, 0x1D35F, 0x1D37F, 0x1D7FF, 0x1F02F, 0x1F09F, 0x1F1FF, 0x1F2FF, 
+    0x2A6DF, 0x2B73F, 0x2FA1F, 0xE007F, 0xE01EF, 0xFFFFF, 0x10FFFF
+  };
+  
+  /** Returns random string of length between 0-20 codepoints, all codepoints within the same unicode block. */
+  public static String randomRealisticUnicodeString(Random r) {
+    return randomRealisticUnicodeString(r, 20);
+  }
+  
+  /** Returns random string of length up to maxLength codepoints , all codepoints within the same unicode block. */
+  public static String randomRealisticUnicodeString(Random r, int maxLength) {
+    return randomRealisticUnicodeString(r, 0, 20);
+  }
+
+  /** Returns random string of length between min and max codepoints, all codepoints within the same unicode block. */
+  public static String randomRealisticUnicodeString(Random r, int minLength, int maxLength) {
+    final int end = minLength + r.nextInt(maxLength);
+    final int block = r.nextInt(blockStarts.length);
+    StringBuilder sb = new StringBuilder();
+    for (int i = 0; i < end; i++)
+      sb.appendCodePoint(nextInt(r, blockStarts[block], blockEnds[block]));
+    return sb.toString();
+  }
+
+  /** Returns random string, with a given UTF-8 byte length*/
+  public static String randomFixedByteLengthUnicodeString(Random r, int length) {
+    
+    final char[] buffer = new char[length*3];
+    int bytes = length;
+    int i = 0;
+    for (; i < buffer.length && bytes != 0; i++) {
+      int t;
+      if (bytes >= 4) {
+        t = r.nextInt(5);
+      } else if (bytes >= 3) {
+        t = r.nextInt(4);
+      } else if (bytes >= 2) {
+        t = r.nextInt(2);
+      } else {
+        t = 0;
+      }
+      if (t == 0) {
+        buffer[i] = (char) r.nextInt(0x80);
+        bytes--;
+      } else if (1 == t) {
+        buffer[i] = (char) nextInt(r, 0x80, 0x7ff);
+        bytes -= 2;
+      } else if (2 == t) {
+        buffer[i] = (char) nextInt(r, 0x800, 0xd7ff);
+        bytes -= 3;
+      } else if (3 == t) {
+        buffer[i] = (char) nextInt(r, 0xe000, 0xfffe);
+        bytes -= 3;
+      } else if (4 == t) {
+        // Make a surrogate pair
+        // High surrogate
+        buffer[i++] = (char) nextInt(r, 0xd800, 0xdbff);
+        // Low surrogate
+        buffer[i] = (char) nextInt(r, 0xdc00, 0xdfff);
+        bytes -= 4;
+      }
+
+    }
+    return new String(buffer, 0, i);
+  }
+
+  /** start and end are BOTH inclusive */
+  public static int nextInt(Random r, int start, int end) {
+    return start + r.nextInt(end-start+1);
+  }
+
+  public static boolean anyFilesExceptWriteLock(Directory dir) throws IOException {
+    String[] files = dir.listAll();
+    if (files.length > 1 || (files.length == 1 && !files[0].equals("write.lock"))) {
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  /** just tries to configure things to keep the open file
+   * count lowish */
+  public static void reduceOpenFiles(IndexWriter w) {
+    // keep number of open files lowish
+    MergePolicy mp = w.getConfig().getMergePolicy();
+    if (mp instanceof LogMergePolicy) {
+      LogMergePolicy lmp = (LogMergePolicy) mp;
+      lmp.setMergeFactor(Math.min(5, lmp.getMergeFactor()));
+    } else if (mp instanceof TieredMergePolicy) {
+      TieredMergePolicy tmp = (TieredMergePolicy) mp;
+      tmp.setMaxMergeAtOnce(Math.min(5, tmp.getMaxMergeAtOnce()));
+      tmp.setSegmentsPerTier(Math.min(5, tmp.getSegmentsPerTier()));
+    }
+
+    MergeScheduler ms = w.getConfig().getMergeScheduler();
+    if (ms instanceof ConcurrentMergeScheduler) {
+      ((ConcurrentMergeScheduler) ms).setMaxThreadCount(2);
+      ((ConcurrentMergeScheduler) ms).setMaxMergeCount(3);
+    }
+  }
+
+  /** Checks some basic behaviour of an AttributeImpl
+   * @param reflectedValues contains a map with "AttributeClass#key" as values
+   */
+  public static <T> void assertAttributeReflection(final AttributeImpl att, Map<String,T> reflectedValues) {
+    final Map<String,Object> map = new HashMap<String,Object>();
+    att.reflectWith(new AttributeReflector() {
+      public void reflect(Class<? extends Attribute> attClass, String key, Object value) {
+        map.put(attClass.getName() + '#' + key, value);
+      }
+    });
+    Assert.assertEquals("Reflection does not produce same map", reflectedValues, map);
+  }
+
+  public static void keepFullyDeletedSegments(IndexWriter w) {
+    try {
+      // Carefully invoke what is a package-private (test
+      // only, internal) method on IndexWriter:
+      Method m = IndexWriter.class.getDeclaredMethod("keepFullyDeletedSegments");
+      m.setAccessible(true);
+      m.invoke(w);
+    } catch (Exception e) {
+      // Should not happen?
+      throw new RuntimeException(e);
+    }
+  }
+  
+  /** 
+   * insecure, fast version of File.createTempFile
+   * uses Random instead of SecureRandom.
+   */
+  public static File createTempFile(String prefix, String suffix, File directory)
+      throws IOException {
+    // Force a prefix null check first
+    if (prefix.length() < 3) {
+      throw new IllegalArgumentException("prefix must be 3");
+    }
+    String newSuffix = suffix == null ? ".tmp" : suffix;
+    File result;
+    do {
+      result = genTempFile(prefix, newSuffix, directory);
+    } while (!result.createNewFile());
+    return result;
+  }
+
+  /* Temp file counter */
+  private static int counter = 0;
+
+  /* identify for differnt VM processes */
+  private static int counterBase = 0;
+
+  private static class TempFileLocker {};
+  private static TempFileLocker tempFileLocker = new TempFileLocker();
+
+  private static File genTempFile(String prefix, String suffix, File directory) {
+    int identify = 0;
+
+    synchronized (tempFileLocker) {
+      if (counter == 0) {
+        int newInt = new Random().nextInt();
+        counter = ((newInt / 65535) & 0xFFFF) + 0x2710;
+        counterBase = counter;
+      }
+      identify = counter++;
+    }
+
+    StringBuilder newName = new StringBuilder();
+    newName.append(prefix);
+    newName.append(counterBase);
+    newName.append(identify);
+    newName.append(suffix);
+    return new File(directory, newName.toString());
+  }
+
+  public static void assertEquals(TopDocs expected, TopDocs actual) {
+    Assert.assertEquals("wrong total hits", expected.totalHits, actual.totalHits);
+    Assert.assertEquals("wrong maxScore", expected.getMaxScore(), actual.getMaxScore(), 0.0);
+    Assert.assertEquals("wrong hit count", expected.scoreDocs.length, actual.scoreDocs.length);
+    for(int hitIDX=0;hitIDX<expected.scoreDocs.length;hitIDX++) {
+      final ScoreDoc expectedSD = expected.scoreDocs[hitIDX];
+      final ScoreDoc actualSD = actual.scoreDocs[hitIDX];
+      Assert.assertEquals("wrong hit docID", expectedSD.doc, actualSD.doc);
+      Assert.assertEquals("wrong hit score", expectedSD.score, actualSD.score, 0.0);
+      if (expectedSD instanceof FieldDoc) {
+        Assert.assertTrue(actualSD instanceof FieldDoc);
+        Assert.assertEquals("wrong sort field values",
+                            ((FieldDoc) expectedSD).fields,
+                            ((FieldDoc) actualSD).fields);
+      } else {
+        Assert.assertFalse(actualSD instanceof FieldDoc);
+      }
+    }
+  }
+
+  // NOTE: this is likely buggy, and cannot clone fields
+  // with tokenStreamValues, etc.  Use at your own risk!!
+
+  // TODO: is there a pre-existing way to do this!!!
+  public static Document cloneDocument(Document doc1) {
+    final Document doc2 = new Document();
+    for(Fieldable f : doc1.getFields()) {
+      Field field1 = (Field) f;
+      
+      Field field2 = new Field(field1.name(),
+                               field1.stringValue(),
+                               field1.isStored() ? Field.Store.YES : Field.Store.NO,
+                               field1.isIndexed() ? (field1.isTokenized() ? Field.Index.ANALYZED : Field.Index.NOT_ANALYZED) : Field.Index.NO);
+      field2.setOmitNorms(field1.getOmitNorms());
+      field2.setIndexOptions(field1.getIndexOptions());
+      doc2.add(field2);
+    }
+
+    return doc2;
+  }
+}
diff --git a/lucene/backwards/src/test-framework/org/apache/lucene/util/europarl.lines.txt.gz b/lucene/backwards/src/test-framework/org/apache/lucene/util/europarl.lines.txt.gz
new file mode 100644
index 0000000..e0366f1
--- /dev/null
+++ b/lucene/backwards/src/test-framework/org/apache/lucene/util/europarl.lines.txt.gz
Binary files differ
diff --git a/lucene/backwards/src/test-framework/overview.html b/lucene/backwards/src/test-framework/overview.html
new file mode 100644
index 0000000..608eb07
--- /dev/null
+++ b/lucene/backwards/src/test-framework/overview.html
@@ -0,0 +1,28 @@
+<html>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<head>
+   <title>Apache Lucene Test Framework API</title>
+</head>
+<body>
+<p>
+  The Lucene Test Framework is used by Lucene as the basis for its tests.  
+  The framework can also be used for testing third-party code that uses
+  the Lucene API. 
+</p>
+</body>
+</html>
diff --git a/lucene/backwards/src/test/org/apache/lucene/TestAssertions.java b/lucene/backwards/src/test/org/apache/lucene/TestAssertions.java
new file mode 100644
index 0000000..ce51fd3
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/TestAssertions.java
@@ -0,0 +1,108 @@
+package org.apache.lucene;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Reader;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+
+public class TestAssertions extends LuceneTestCase {
+
+  public void testBasics() {
+    try {
+      assert Boolean.FALSE.booleanValue();
+      fail("assertions are not enabled!");
+    } catch (AssertionError e) {
+      assert Boolean.TRUE.booleanValue();
+    }
+  }
+  
+  static class TestAnalyzer1 extends Analyzer {
+    @Override
+    public final TokenStream tokenStream(String s, Reader r) { return null; }
+    @Override
+    public final TokenStream reusableTokenStream(String s, Reader r) { return null; }
+  }
+
+  static final class TestAnalyzer2 extends Analyzer {
+    @Override
+    public TokenStream tokenStream(String s, Reader r) { return null; }
+    @Override
+    public TokenStream reusableTokenStream(String s, Reader r) { return null; }
+  }
+
+  static class TestAnalyzer3 extends Analyzer {
+    @Override
+    public TokenStream tokenStream(String s, Reader r) { return null; }
+    @Override
+    public TokenStream reusableTokenStream(String s, Reader r) { return null; }
+  }
+
+  static class TestAnalyzer4 extends Analyzer {
+    @Override
+    public final TokenStream tokenStream(String s, Reader r) { return null; }
+    @Override
+    public TokenStream reusableTokenStream(String s, Reader r) { return null; }
+  }
+
+  static class TestTokenStream1 extends TokenStream {
+    @Override
+    public final boolean incrementToken() { return false; }
+  }
+
+  static final class TestTokenStream2 extends TokenStream {
+    @Override
+    public boolean incrementToken() { return false; }
+  }
+
+  static class TestTokenStream3 extends TokenStream {
+    @Override
+    public boolean incrementToken() { return false; }
+  }
+
+  public void testTokenStreams() {
+    new TestAnalyzer1();
+    
+    new TestAnalyzer2();
+    
+    try {
+      new TestAnalyzer3();
+      fail("TestAnalyzer3 should fail assertion");
+    } catch (AssertionError e) {
+    }
+    
+    try {
+      new TestAnalyzer4();
+      fail("TestAnalyzer4 should fail assertion");
+    } catch (AssertionError e) {
+    }
+    
+    new TestTokenStream1();
+    
+    new TestTokenStream2();
+    
+    try {
+      new TestTokenStream3();
+      fail("TestTokenStream3 should fail assertion");
+    } catch (AssertionError e) {
+    }
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/TestDemo.java b/lucene/backwards/src/test/org/apache/lucene/TestDemo.java
new file mode 100644
index 0000000..3414997
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/TestDemo.java
@@ -0,0 +1,79 @@
+package org.apache.lucene;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.queryParser.ParseException;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * A very simple demo used in the API documentation (src/java/overview.html).
+ *
+ * Please try to keep src/java/overview.html up-to-date when making changes
+ * to this class.
+ */
+public class TestDemo extends LuceneTestCase {
+
+  public void testDemo() throws IOException, ParseException {
+    Analyzer analyzer = new MockAnalyzer(random);
+
+    // Store the index in memory:
+    Directory directory = newDirectory();
+    // To store an index on disk, use this instead:
+    //Directory directory = FSDirectory.open("/tmp/testindex");
+    RandomIndexWriter iwriter = new RandomIndexWriter(random, directory, analyzer);
+    iwriter.w.setInfoStream(VERBOSE ? System.out : null);
+    Document doc = new Document();
+    String text = "This is the text to be indexed.";
+    doc.add(newField("fieldname", text, Field.Store.YES,
+        Field.Index.ANALYZED));
+    iwriter.addDocument(doc);
+    iwriter.close();
+    
+    // Now search the index:
+    IndexSearcher isearcher = new IndexSearcher(directory, true); // read-only=true
+    // Parse a simple query that searches for "text":
+    QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "fieldname", analyzer);
+    Query query = parser.parse("text");
+    TopDocs hits = isearcher.search(query, null, 1);
+    assertEquals(1, hits.totalHits);
+    // Iterate through the results:
+    for (int i = 0; i < hits.scoreDocs.length; i++) {
+      Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
+      assertEquals("This is the text to be indexed.", hitDoc.get("fieldname"));
+    }
+
+    // Test simple phrase query
+    query = parser.parse("\"to be\"");
+    assertEquals(1, isearcher.search(query, null, 1).totalHits);
+
+    isearcher.close();
+    directory.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/TestMergeSchedulerExternal.java b/lucene/backwards/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
new file mode 100644
index 0000000..e477ae9
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
@@ -0,0 +1,149 @@
+package org.apache.lucene;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.index.CorruptIndexException;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LogMergePolicy;
+import org.apache.lucene.index.MergePolicy;
+import org.apache.lucene.index.ConcurrentMergeScheduler;
+import org.apache.lucene.index.MergeScheduler;
+import org.apache.lucene.index.MergePolicy.OneMerge;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+
+/**
+ * Holds tests cases to verify external APIs are accessible
+ * while not being in org.apache.lucene.index package.
+ */
+public class TestMergeSchedulerExternal extends LuceneTestCase {
+
+  volatile boolean mergeCalled;
+  volatile boolean mergeThreadCreated;
+  volatile boolean excCalled;
+
+  private class MyMergeScheduler extends ConcurrentMergeScheduler {
+
+    private class MyMergeThread extends ConcurrentMergeScheduler.MergeThread {
+      public MyMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
+        super(writer, merge);
+        mergeThreadCreated = true;
+      }
+    }
+
+    @Override
+    protected MergeThread getMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
+      MergeThread thread = new MyMergeThread(writer, merge);
+      thread.setThreadPriority(getMergeThreadPriority());
+      thread.setDaemon(true);
+      thread.setName("MyMergeThread");
+      return thread;
+    }
+
+    @Override
+    protected void handleMergeException(Throwable t) {
+      excCalled = true;
+    }
+
+    @Override
+    protected void doMerge(MergePolicy.OneMerge merge) throws IOException {
+      mergeCalled = true;
+      super.doMerge(merge);
+    }
+  }
+
+  private static class FailOnlyOnMerge extends MockDirectoryWrapper.Failure {
+    @Override
+    public void eval(MockDirectoryWrapper dir)  throws IOException {
+      StackTraceElement[] trace = new Exception().getStackTrace();
+      for (int i = 0; i < trace.length; i++) {
+        if ("doMerge".equals(trace[i].getMethodName()))
+          throw new IOException("now failing during merge");
+      }
+    }
+  }
+
+  public void testSubclassConcurrentMergeScheduler() throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+    dir.failOn(new FailOnlyOnMerge());
+
+    Document doc = new Document();
+    Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+    doc.add(idField);
+    
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergeScheduler(new MyMergeScheduler())
+        .setMaxBufferedDocs(2).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
+        .setMergePolicy(newLogMergePolicy()));
+    LogMergePolicy logMP = (LogMergePolicy) writer.getConfig().getMergePolicy();
+    logMP.setMergeFactor(10);
+    for(int i=0;i<20;i++)
+      writer.addDocument(doc);
+
+    ((MyMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
+    writer.close();
+    
+    assertTrue(mergeThreadCreated);
+    assertTrue(mergeCalled);
+    assertTrue(excCalled);
+    dir.close();
+  }
+  
+  private static class ReportingMergeScheduler extends MergeScheduler {
+
+    @Override
+    public void merge(IndexWriter writer) throws CorruptIndexException, IOException {
+      OneMerge merge = null;
+      while ((merge = writer.getNextMerge()) != null) {
+        if (VERBOSE) {
+          System.out.println("executing merge " + merge.segString(writer.getDirectory()));
+        }
+        writer.merge(merge);
+      }
+    }
+
+    @Override
+    public void close() throws CorruptIndexException, IOException {}
+    
+  }
+
+  public void testCustomMergeScheduler() throws Exception {
+    // we don't really need to execute anything, just to make sure the custom MS
+    // compiles. But ensure that it can be used as well, e.g., no other hidden
+    // dependencies or something. Therefore, don't use any random API !
+    Directory dir = new RAMDirectory();
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, null);
+    conf.setMergeScheduler(new ReportingMergeScheduler());
+    IndexWriter writer = new IndexWriter(dir, conf);
+    writer.addDocument(new Document());
+    writer.commit(); // trigger flush
+    writer.addDocument(new Document());
+    writer.commit(); // trigger flush
+    writer.optimize();
+    writer.close();
+    dir.close();
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/TestSearch.java b/lucene/backwards/src/test/org/apache/lucene/TestSearch.java
new file mode 100644
index 0000000..7697878
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/TestSearch.java
@@ -0,0 +1,144 @@
+package org.apache.lucene;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.GregorianCalendar;
+import java.util.Random;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+
+import org.apache.lucene.util.LuceneTestCase;
+import junit.framework.TestSuite;
+import junit.textui.TestRunner;
+
+import org.apache.lucene.store.*;
+import org.apache.lucene.document.*;
+import org.apache.lucene.analysis.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.queryParser.*;
+
+/** JUnit adaptation of an older test case SearchTest. */
+public class TestSearch extends LuceneTestCase {
+
+    /** Main for running test case by itself. */
+    public static void main(String args[]) {
+        TestRunner.run (new TestSuite(TestSearch.class));
+    }
+
+    /** This test performs a number of searches. It also compares output
+     *  of searches using multi-file index segments with single-file
+     *  index segments.
+     *
+     *  TODO: someone should check that the results of the searches are
+     *        still correct by adding assert statements. Right now, the test
+     *        passes if the results are the same between multi-file and
+     *        single-file formats, even if the results are wrong.
+     */
+    public void testSearch() throws Exception {
+      StringWriter sw = new StringWriter();
+      PrintWriter pw = new PrintWriter(sw, true);
+      doTestSearch(random, pw, false);
+      pw.close();
+      sw.close();
+      String multiFileOutput = sw.getBuffer().toString();
+      //System.out.println(multiFileOutput);
+
+      sw = new StringWriter();
+      pw = new PrintWriter(sw, true);
+      doTestSearch(random, pw, true);
+      pw.close();
+      sw.close();
+      String singleFileOutput = sw.getBuffer().toString();
+
+      assertEquals(multiFileOutput, singleFileOutput);
+    }
+
+
+    private void doTestSearch(Random random, PrintWriter out, boolean useCompoundFile)
+    throws Exception {
+      Directory directory = newDirectory();
+      Analyzer analyzer = new MockAnalyzer(random);
+      IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
+      MergePolicy mp = conf.getMergePolicy();
+      if (mp instanceof LogMergePolicy) {
+        ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
+      }
+      
+      IndexWriter writer = new IndexWriter(directory, conf);
+
+      String[] docs = {
+        "a b c d e",
+        "a b c d e a b c d e",
+        "a b c d e f g h i j",
+        "a c e",
+        "e c a",
+        "a c e a c e",
+        "a c e a b c"
+      };
+      for (int j = 0; j < docs.length; j++) {
+        Document d = new Document();
+        d.add(newField("contents", docs[j], Field.Store.YES, Field.Index.ANALYZED));
+        d.add(newField("id", ""+j, Field.Index.NOT_ANALYZED_NO_NORMS));
+        writer.addDocument(d);
+      }
+      writer.close();
+
+      Searcher searcher = new IndexSearcher(directory, true);
+
+      String[] queries = {
+        "a b",
+        "\"a b\"",
+        "\"a b c\"",
+        "a c",
+        "\"a c\"",
+        "\"a c e\"",
+      };
+      ScoreDoc[] hits = null;
+
+      Sort sort = new Sort(new SortField[] {
+          SortField.FIELD_SCORE,
+          new SortField("id", SortField.INT)});
+
+      QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "contents", analyzer);
+      parser.setPhraseSlop(4);
+      for (int j = 0; j < queries.length; j++) {
+        Query query = parser.parse(queries[j]);
+        out.println("Query: " + query.toString("contents"));
+
+        hits = searcher.search(query, null, 1000, sort).scoreDocs;
+
+        out.println(hits.length + " total results");
+        for (int i = 0 ; i < hits.length && i < 10; i++) {
+          Document d = searcher.doc(hits[i].doc);
+          out.println(i + " " + hits[i].score
+// 			   + " " + DateField.stringToDate(d.get("modified"))
+                             + " " + d.get("contents"));
+        }
+      }
+      searcher.close();
+      directory.close();
+  }
+
+  static long Time(int year, int month, int day) {
+    GregorianCalendar calendar = new GregorianCalendar();
+    calendar.clear();
+    calendar.set(year, month, day);
+    return calendar.getTime().getTime();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/TestSearchForDuplicates.java b/lucene/backwards/src/test/org/apache/lucene/TestSearchForDuplicates.java
new file mode 100644
index 0000000..32a74ce
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/TestSearchForDuplicates.java
@@ -0,0 +1,158 @@
+package org.apache.lucene;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.Random;
+
+import org.apache.lucene.store.*;
+import org.apache.lucene.document.*;
+import org.apache.lucene.analysis.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.queryParser.*;
+import org.apache.lucene.util.LuceneTestCase;
+import junit.framework.TestSuite;
+import junit.textui.TestRunner;
+
+public class TestSearchForDuplicates extends LuceneTestCase {
+
+    /** Main for running test case by itself. */
+    public static void main(String args[]) {
+        TestRunner.run (new TestSuite(TestSearchForDuplicates.class));
+    }
+
+
+
+  static final String PRIORITY_FIELD ="priority";
+  static final String ID_FIELD ="id";
+  static final String HIGH_PRIORITY ="high";
+  static final String MED_PRIORITY ="medium";
+  static final String LOW_PRIORITY ="low";
+
+
+  /** This test compares search results when using and not using compound
+   *  files.
+   *
+   *  TODO: There is rudimentary search result validation as well, but it is
+   *        simply based on asserting the output observed in the old test case,
+   *        without really knowing if the output is correct. Someone needs to
+   *        validate this output and make any changes to the checkHits method.
+   */
+  public void testRun() throws Exception {
+      StringWriter sw = new StringWriter();
+      PrintWriter pw = new PrintWriter(sw, true);
+      final int MAX_DOCS = atLeast(225);
+      doTest(random, pw, false, MAX_DOCS);
+      pw.close();
+      sw.close();
+      String multiFileOutput = sw.getBuffer().toString();
+      //System.out.println(multiFileOutput);
+
+      sw = new StringWriter();
+      pw = new PrintWriter(sw, true);
+      doTest(random, pw, true, MAX_DOCS);
+      pw.close();
+      sw.close();
+      String singleFileOutput = sw.getBuffer().toString();
+
+      assertEquals(multiFileOutput, singleFileOutput);
+  }
+
+
+  private void doTest(Random random, PrintWriter out, boolean useCompoundFiles, int MAX_DOCS) throws Exception {
+      Directory directory = newDirectory();
+      Analyzer analyzer = new MockAnalyzer(random);
+      IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
+      final MergePolicy mp = conf.getMergePolicy();
+      if (mp instanceof LogMergePolicy) {
+        ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFiles);
+      }
+      IndexWriter writer = new IndexWriter(directory, conf);
+      if (VERBOSE) {
+        System.out.println("TEST: now build index");
+        writer.setInfoStream(System.out);
+      }
+
+      for (int j = 0; j < MAX_DOCS; j++) {
+        Document d = new Document();
+        d.add(newField(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES, Field.Index.ANALYZED));
+        d.add(newField(ID_FIELD, Integer.toString(j), Field.Store.YES, Field.Index.ANALYZED));
+        writer.addDocument(d);
+      }
+      writer.close();
+
+      // try a search without OR
+      Searcher searcher = new IndexSearcher(directory, true);
+
+      QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, PRIORITY_FIELD, analyzer);
+
+      Query query = parser.parse(HIGH_PRIORITY);
+      out.println("Query: " + query.toString(PRIORITY_FIELD));
+
+      final Sort sort = new Sort(new SortField[] {
+          SortField.FIELD_SCORE,
+          new SortField(ID_FIELD, SortField.INT)});
+
+      ScoreDoc[] hits = searcher.search(query, null, MAX_DOCS, sort).scoreDocs;
+      printHits(out, hits, searcher);
+      checkHits(hits, MAX_DOCS, searcher);
+
+      searcher.close();
+
+      // try a new search with OR
+      searcher = new IndexSearcher(directory, true);
+      hits = null;
+
+      parser = new QueryParser(TEST_VERSION_CURRENT, PRIORITY_FIELD, analyzer);
+
+      query = parser.parse(HIGH_PRIORITY + " OR " + MED_PRIORITY);
+      out.println("Query: " + query.toString(PRIORITY_FIELD));
+
+      hits = searcher.search(query, null, MAX_DOCS, sort).scoreDocs;
+      printHits(out, hits, searcher);
+      checkHits(hits, MAX_DOCS, searcher);
+
+      searcher.close();
+      directory.close();
+  }
+
+
+  private void printHits(PrintWriter out, ScoreDoc[] hits, Searcher searcher ) throws IOException {
+    out.println(hits.length + " total results\n");
+    for (int i = 0 ; i < hits.length; i++) {
+      if ( i < 10 || (i > 94 && i < 105) ) {
+        Document d = searcher.doc(hits[i].doc);
+        out.println(i + " " + d.get(ID_FIELD));
+      }
+    }
+  }
+
+  private void checkHits(ScoreDoc[] hits, int expectedCount, Searcher searcher) throws IOException {
+    assertEquals("total results", expectedCount, hits.length);
+    for (int i = 0 ; i < hits.length; i++) {
+      if (i < 10 || (i > 94 && i < 105) ) {
+      Document d = searcher.doc(hits[i].doc);
+        assertEquals("check " + i, String.valueOf(i), d.get(ID_FIELD));
+      }
+    }
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/LuceneResourcesWikiPage.html b/lucene/backwards/src/test/org/apache/lucene/analysis/LuceneResourcesWikiPage.html
new file mode 100644
index 0000000..cc23b3d
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/LuceneResourcesWikiPage.html
@@ -0,0 +1,267 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+<meta http-equiv="Content-Type" content="text/html;charset=utf-8">
+<meta name="robots" content="index,nofollow">
+
+<title>Resources - Lucene-java Wiki</title>
+<script type="text/javascript" src="/moin_static184/common/js/common.js"></script>
+
+<script type="text/javascript">
+<!--
+var search_hint = "Search";
+//-->
+</script>
+
+
+<link rel="stylesheet" type="text/css" charset="utf-8" media="all" href="/moin_static184/modernized/css/common.css">
+<link rel="stylesheet" type="text/css" charset="utf-8" media="screen" href="/moin_static184/modernized/css/screen.css">
+<link rel="stylesheet" type="text/css" charset="utf-8" media="print" href="/moin_static184/modernized/css/print.css">
+<link rel="stylesheet" type="text/css" charset="utf-8" media="projection" href="/moin_static184/modernized/css/projection.css">
+
+<!-- css only for MS IE6/IE7 browsers -->
+<!--[if lt IE 8]>
+   <link rel="stylesheet" type="text/css" charset="utf-8" media="all" href="/moin_static184/modernized/css/msie.css">
+<![endif]-->
+
+
+
+
+
+<link rel="Start" href="/lucene-java/FrontPageEN">
+<link rel="Alternate" title="Wiki Markup" href="/lucene-java/Resources?action=raw">
+<link rel="Alternate" media="print" title="Print View" href="/lucene-java/Resources?action=print">
+<link rel="Appendix" title="IntroductionToApacheLucene.jp.jpg" href="/lucene-java/Resources?action=AttachFile&amp;do=view&amp;target=IntroductionToApacheLucene.jp.jpg">
+<link rel="Appendix" title="SuchmaschinenEntwickelnMitApacheLucene.de.jpg" href="/lucene-java/Resources?action=AttachFile&amp;do=view&amp;target=SuchmaschinenEntwickelnMitApacheLucene.de.jpg">
+<link rel="Appendix" title="building.search.applications.png" href="/lucene-java/Resources?action=AttachFile&amp;do=view&amp;target=building.search.applications.png">
+<link rel="Appendix" title="lia3d.jpg" href="/lucene-java/Resources?action=AttachFile&amp;do=view&amp;target=lia3d.jpg">
+<link rel="Search" href="/lucene-java/FindPage">
+<link rel="Index" href="/lucene-java/TitleIndex">
+<link rel="Glossary" href="/lucene-java/WordIndex">
+<link rel="Help" href="/lucene-java/HelpOnFormatting">
+</head>
+
+<body  lang="en" dir="ltr">
+
+<div id="header">
+
+<form id="searchform" method="get" action="/lucene-java/Resources">
+<div>
+<input type="hidden" name="action" value="fullsearch">
+<input type="hidden" name="context" value="180">
+<label for="searchinput">Search:</label>
+<input id="searchinput" type="text" name="value" value="" size="20"
+    onfocus="searchFocus(this)" onblur="searchBlur(this)"
+    onkeyup="searchChange(this)" onchange="searchChange(this)" alt="Search">
+<input id="titlesearch" name="titlesearch" type="submit"
+    value="Titles" alt="Search Titles">
+<input id="fullsearch" name="fullsearch" type="submit"
+    value="Text" alt="Search Full Text">
+</div>
+</form>
+<script type="text/javascript">
+<!--// Initialize search form
+var f = document.getElementById('searchform');
+f.getElementsByTagName('label')[0].style.display = 'none';
+var e = document.getElementById('searchinput');
+searchChange(e);
+searchBlur(e);
+//-->
+</script>
+
+<div id="logo"><a href="/lucene-java/FrontPageEN">Lucene-java Wiki</a></div>
+<div id="username"><a href="/lucene-java/Resources?action=login" id="login" rel="nofollow">Login</a></div>
+<h1 id="locationline">
+
+<span id="pagelocation"><a class="backlink" href="/lucene-java/Resources?action=fullsearch&amp;context=180&amp;value=linkto%3A%22Resources%22" rel="nofollow" title="Click to do a full-text search for this title">Resources</a></span>
+</h1>
+
+
+<ul id="navibar">
+<li class="wikilink"><a href="/lucene-java/FrontPageEN">FrontPageEN</a></li><li class="wikilink"><a href="/lucene-java/RecentChanges">RecentChanges</a></li><li class="wikilink"><a href="/lucene-java/FindPage">FindPage</a></li><li class="wikilink"><a href="/lucene-java/HelpContents">HelpContents</a></li><li class="current"><a href="/lucene-java/Resources">Resources</a></li>
+</ul>
+
+<div id="pageline"><hr style="display:none;"></div>
+
+<ul class="editbar"><li><span class="disabled">Immutable Page</span></li><li class="toggleCommentsButton" style="display:none;"><a href="#" class="nbcomment" onClick="toggleComments();return false;">Comments</a></li><li><a class="nbinfo" href="/lucene-java/Resources?action=info" rel="nofollow">Info</a></li><li>
+<form class="actionsmenu" method="GET" action="/lucene-java/Resources">
+<div>
+    <label>More Actions:</label>
+    <select name="action"
+        onchange="if ((this.selectedIndex != 0) &&
+                      (this.options[this.selectedIndex].disabled == false)) {
+                this.form.submit();
+            }
+            this.selectedIndex = 0;">
+        <option value="raw">Raw Text</option>
+<option value="print">Print View</option>
+<option value="RenderAsDocbook">Render as Docbook</option>
+<option value="refresh">Delete Cache</option>
+<option value="show" disabled class="disabled">------------------------</option>
+<option value="SpellCheck">Check Spelling</option>
+<option value="LikePages">Like Pages</option>
+<option value="LocalSiteMap">Local Site Map</option>
+<option value="show" disabled class="disabled">------------------------</option>
+<option value="RenamePage" disabled class="disabled">Rename Page</option>
+<option value="CopyPage">Copy Page</option>
+<option value="DeletePage" disabled class="disabled">Delete Page</option>
+<option value="show" disabled class="disabled">------------------------</option>
+<option value="MyPages">My Pages</option>
+<option value="show" disabled class="disabled">Subscribe User</option>
+<option value="show" disabled class="disabled">------------------------</option>
+<option value="show" disabled class="disabled">Remove Spam</option>
+<option value="show" disabled class="disabled">Revert to this revision</option>
+<option value="show" disabled class="disabled">Package Pages</option>
+<option value="SyncPages">Sync Pages</option>
+<option value="show" disabled class="disabled">------------------------</option>
+<option value="Load">Load</option>
+<option value="Save">Save</option>
+    </select>
+    <input type="submit" value="Do">
+    
+</div>
+<script type="text/javascript">
+<!--// Init menu
+actionsMenuInit('More Actions:');
+//-->
+</script>
+</form>
+</li></ul>
+
+</div>
+
+<div id="page" lang="en" dir="ltr">
+<div dir="ltr" id="content" lang="en"><span class="anchor" id="top"></span>
+<span class="anchor" id="line-2"></span><p class="line867"><div class="table-of-contents"><p class="table-of-contents-heading">Contents<ol><li>
+<a href="#Introductions">Introductions</a></li><li>
+<a href="#Blogs">Blogs</a></li><li>
+<a href="#Books">Books</a></li><li>
+<a href="#Articles">Articles</a></li><li>
+<a href="#Interviews">Interviews</a></li><li>
+<a href="#Papers">Papers</a></li><li>
+<a href="#Presentations">Presentations</a></li><li>
+<a href="#Training">Training</a></li><li>
+<a href="#Corpora">Corpora</a></li><li>
+<a href="#Other">Other</a></li></ol></div> <span class="anchor" id="line-3"></span><span class="anchor" id="line-4"></span><p class="line867">
+<h1 id="Introductions">Introductions</h1>
+<span class="anchor" id="line-5"></span><span class="anchor" id="line-6"></span><ul><li><p class="line862">The API documentation contains  <a class="http" href="http://lucene.apache.org/java/3_0_1/api/all/overview-summary.html#overview_description">a short and simple code example</a> that shows the basic way to index and search <span class="anchor" id="line-7"></span></li><li><p class="line862">The <a class="http" href="http://lucene.apache.org/java/3_0_1/gettingstarted.html">Getting Started Guide</a> that describes the demos that come with Lucene <span class="anchor" id="line-8"></span><span class="anchor" id="line-9"></span><span class="anchor" id="line-10"></span></li></ul><p class="line867">
+<h1 id="Blogs">Blogs</h1>
+<span class="anchor" id="line-11"></span><span class="anchor" id="line-12"></span><ul><li><p class="line891"><a class="http" href="http://lucene.grantingersoll.com">Grant's Grunts: Lucene edition</a> - Grant Ingersoll's thoughts on the Lucene ecosystem. <span class="anchor" id="line-13"></span></li><li><p class="line891"><a class="http" href="http://www.lucidimagination.com/blog/">Lucid Imagination's Blog</a> - Many of the Lucene and Solr committers blog here about how to use Lucene and Solr <span class="anchor" id="line-14"></span></li><li><p class="line891"><a class="http" href="http://blog.sematext.com/">Sematext Blog</a> - Search and Analytics covering Lucene, Solr, Nutch, Hadoop, HBase, and more <span class="anchor" id="line-15"></span><span class="anchor" id="line-16"></span><span class="anchor" id="line-17"></span></li></ul><p class="line867">
+<h1 id="Books">Books</h1>
+<span class="anchor" id="line-18"></span><span class="anchor" id="line-19"></span><ul><li><p class="line891"><img alt="http://www.manning.com/hatcher3/hatcher3_cover150.jpg" class="external_image" src="http://www.manning.com/hatcher3/hatcher3_cover150.jpg" title="http://www.manning.com/hatcher3/hatcher3_cover150.jpg" /> "<a class="http" href="http://www.manning.com/hatcher3/">Lucene in Action, Second Edition"</a> by Erik Hatcher, Otis Gospodneti&#263;, and Michael McCandless <span class="anchor" id="line-20"></span></li><li><p class="line891"><img alt="building.search.applications.png" class="attachment" src="/lucene-java/Resources?action=AttachFile&amp;do=get&amp;target=building.search.applications.png" title="building.search.applications.png" /> "<a class="http" href="http://www.amazon.com/Building-Search-Applications-Lucene-Lingpipe/dp/0615204252/">Building Search Applications: Lucene, LingPipe, and Gate</a>" by Manu Konchady; Mustru Publishing; June 2008; ISBN 978-0615204253 <span class="anchor" id="line-21"></span></li><li><p class="line891"><img alt="IntroductionToApacheLucene.jp.jpg" class="attachment" src="/lucene-java/Resources?action=AttachFile&amp;do=get&amp;target=IntroductionToApacheLucene.jp.jpg" title="IntroductionToApacheLucene.jp.jpg" /> "<a class="http" href="http://www.amazon.co.jp/exec/obidos/ASIN/4774127809/503-9461699-1775907">Apache Lucene 入門 ~Java・オープンソース・全文検索システムの構築</a>" 関口 宏司 ; 技術評論社 ; 2006/05/17 ; ISBN: 4774127809 (<span class="u">Introduction to Apache Lucene: Construction of Java Open Source Full Text Retrieval Systems</span> by Koshi Sekiguti ; Gijutsu-Hyohron Co., Ltd.) <span class="anchor" id="line-22"></span></li><li><p class="line891"><img alt="lia3d.jpg" class="attachment" src="/lucene-java/Resources?action=AttachFile&amp;do=get&amp;target=lia3d.jpg" title="lia3d.jpg" /> "<a class="http" href="http://www.lucenebook.com">Lucene In Action</a>" by Erik Hatcher, Otis Gospodneti&#263;; Manning Publications; December 2004; ISBN 1932394281 (also available from <a class="http" href="http://www.amazon.com/exec/obidos/ASIN/1932394281">Amazon.com</a>) <span class="anchor" id="line-23"></span></li><li><p class="line891"><img alt="SuchmaschinenEntwickelnMitApacheLucene.de.jpg" class="attachment" src="/lucene-java/Resources?action=AttachFile&amp;do=get&amp;target=SuchmaschinenEntwickelnMitApacheLucene.de.jpg" title="SuchmaschinenEntwickelnMitApacheLucene.de.jpg" /> Manfred Hardt, Dr. Fabian Theis: "<a class="http" href="http://www.amazon.de/Suchmaschinen-entwickeln-mit-Apache-Lucene/dp/3935042450">Suchmaschinen entwickeln mit Apache Lucene</a>"; Software &amp; Support Verlag, Frankfurt/Main, Germany; September 2004; ISBN 3935042450 (<span class="u">Developing Search Engines with Apache Lucene</span>) <span class="anchor" id="line-24"></span><span class="anchor" id="line-25"></span></li></ul><p class="line867">
+<h1 id="Articles">Articles</h1>
+<span class="anchor" id="line-26"></span><span class="anchor" id="line-27"></span><ul><li><p class="line891"><a class="http" href="http://www.lucidimagination.com/Community/Hear-from-the-Experts/Articles/Getting-Started-with-Lucene/">Getting Started with Lucene</a> (by Grant Ingersoll) <br>
+ (<em>Published: January 2009 - article</em>) <span class="anchor" id="line-28"></span></li><li><p class="line891"><a class="http" href="http://www.lucidimagination.com/Community/Hear-from-the-Experts/Articles/Optimizing-Findability-in-Lucene-and-Solr/">Optimizing Findability in Lucene and Solr</a> (by  Grant Ingersoll)<br>
+ (<em>Published: January 2009 - article</em>) <span class="anchor" id="line-29"></span></li><li><p class="line891"><a class="http" href="http://www.lucidimagination.com/Community/Hear-from-the-Experts/Articles/Debugging-Relevance-Issues-in-Search/">Debugging Relevance Issues in Search</a> (by Grant Ingersoll)<br>
+ (<em>Published: January 2009 - article</em>) <span class="anchor" id="line-30"></span></li><li><p class="line891"><a class="http" href="http://www.lucidimagination.com/Community/Hear-from-the-Experts/Articles/Scaling-Lucene-and-Solr/">Scaling Lucene and Solr</a> (by Mark Miller)<br>
+ (<em>Published: January 2009 - article</em>)  <span class="anchor" id="line-31"></span></li><li><p class="line891"><a class="http" href="http://www.lucidimagination.com/Community/Hear-from-the-Experts/Articles/Introduction-to-Apache-Lucene-and-Solr/">Introduction to Apache Lucene and Solr</a> (by Marc Krellenstein)<br>
+(<em>Published: January 2009 - article</em>)  <span class="anchor" id="line-32"></span></li><li><p class="line891"><a class="http" href="http://cephas.net/blog/2008/03/30/how-morelikethis-works-in-lucene/">How MoreLikeThis Works in Lucene</a> (by Aaron Johnson)<br>
+(<em>Last updated: March 2008 - blog entry</em>) <span class="anchor" id="line-33"></span></li><li><p class="line891"><a class="http" href="http://schmidt.devlib.org/software/lucene-wikipedia.html">Lucene Wikipedia indexer</a> (by Marco Schmidt)<br>
+(<em>Last updated: November 2007 - tutorial</em>) <span class="anchor" id="line-34"></span></li><li><p class="line891"><a class="http" href="http://marceloochoa.blogspot.com/2007/09/running-lucene-inside-your-oracle-jvm.html">Running Lucene inside your Oracle JVM</a> (by Marcelo Ochoa)<br>
+(<em>Last updated: September 2007 - blog entry</em>) <span class="anchor" id="line-35"></span></li><li><p class="line891"><a class="http" href="http://www.onjava.com/pub/a/onjava/2007/05/24/using-the-lucene-query-parser-without-lucene.html">Using the Lucene Query Parser Without Lucene</a> (by Marcin Maciukiewicz and Daniel Owsiański)<br>
+(<em>Published: May 2007 - article</em>) <span class="anchor" id="line-36"></span></li><li><p class="line891"><a class="http" href="http://www.javaworld.com/javaworld/jw-09-2006/jw-0925-lucene.html">Integrate advanced search functionalities into your apps</a> (by John Ferguson Smart)<br>
+(<em>Published: September 2006 - article</em>) <span class="anchor" id="line-37"></span></li><li><p class="line891"><a class="http" href="http://www-128.ibm.com/developerworks/java/library/wa-lucene2/index.html?ca=drs-">Beef up Web search applications with Lucene</a> (by Deng Peng Zhou)<br>
+(<em>Published: August 2006 - article</em>) <span class="anchor" id="line-38"></span></li><li><p class="line891"><a class="http" href="http://www.freesearch.pe.kr/tag/Lucene">Lecture &amp; Etc : Lucene index file format for Korean</a> (by Jeon Hee-Won)<br>
+(<em>Published: July 2006 - article</em>) <span class="anchor" id="line-39"></span></li><li>Cai Ziegler: "Suche nach Suche -- Apaches Lucene: eigene Suche und Indizierung"; iX 6/2006, Seite 120; Heise Zeitschriften Verlag, Hannover, Germany <span class="anchor" id="line-40"></span></li><li><p class="line891"><a class="http" href="http://www-128.ibm.com/developerworks/java/library/wa-lucene/index.html">Delve inside the Lucene indexing mechanism</a> (by Deng Peng Zhou)<br>
+(<em>Published: June 2006 - article</em>) <span class="anchor" id="line-41"></span></li><li><p class="line891"><a class="http" href="http://www.onjava.com/pub/a/onjava/2006/01/18/using-lucene-to-search-java-source.html">Using Lucene to Search Java Source Code</a> (by Renuka Sindhgatta)<br>
+(<em>Published: January 2006 - article</em>) <span class="anchor" id="line-42"></span></li><li><p class="line891"><a class="http" href="http://www.jroller.com/page/wakaleo/?anchor=lucene_a_tutorial_introduction_to">Lucene : a tutorial introduction to full-text indexing in Java</a> (by John Ferguson Smart)<br>
+(<em>Published: October 2005 - article</em>) <span class="anchor" id="line-43"></span></li><li>Daniel Naber: "Herr der Suche -- Eigene Anwendungen mit Volltextsuche erweitern"; c't 7/2005, Seite 196; Heise Zeitschriften Verlag, Hannover, Germany <span class="anchor" id="line-44"></span></li><li><p class="line891"><a class="http" href="http://blog.dev.sf.net/index.php?/archives/10-Behind-the-Scenes-of-the-SourceForge.net-Search-System.html">Behind the Scenes of the SourceForge.net Search System</a> (by Chris Conrad)<br>
+(<em>Last updated: June 2005 - blog entry</em>) <span class="anchor" id="line-45"></span></li><li><p class="line891"><a class="http" href="http://today.java.net/pub/a/today/2005/08/09/didyoumean.html">Did You Mean: Lucene?</a> (by Tom White)<br>
+(<em>Published: August 2005 - article</em>) <span class="anchor" id="line-46"></span></li><li><p class="line891"><a class="http" href="http://www.developer.com/java/other/article.php/3490471">Meet Lucene</a> (by Otis Gospodneti&#263;, Eric Hatcher)<br>
+(<em>Published: March 2005 - article</em>) <span class="anchor" id="line-47"></span></li><li><p class="line891"><a class="http" href="http://www.theserverside.com/tt/articles/article.tss?l=ILoveLucene">I Love Lucene</a> (by Dion Almaer)<br>
+(<em>Published: January 2005 - article</em>) <span class="anchor" id="line-48"></span></li><li><p class="line891"><a class="http" href="http://javaboutique.internet.com/tutorials/HTMLParser/article.html">Unweaving a Tangled Web With HTMLParser and Lucene</a> (by Keld H. Hansen)<br>
+(<em>Last updated: October 2004 - tutorial</em>) <span class="anchor" id="line-49"></span></li><li><p class="line891"><a class="http" href="http://bilgidata.com/localhost/bilgidata/yazi.jsp@dosya=a_lucene.xml.html">Lucene Introduction in Turkish</a> Java Bazl&#305; Arama Motoru - Lusin (by Burak Bayraml&#305;)<br>
+(<em>Last updated: August 2004 - tutorial</em>) <span class="anchor" id="line-50"></span></li><li><p class="line891"><a class="http" href="http://www.chedong.com/tech/lucene.html">Lucene Introduction in Chinese</a> Lucene&#65306;&#22522;&#20110;Java&#30340;&#20840;&#25991;&#26816;&#32034;&#24341;&#25806;&#31616;&#20171; (by Che Dong; &#20316;&#32773;&#65306; &#36710;&#19996;)<br>
+(<em>Last updated: May 2004 - tutorial</em>) <span class="anchor" id="line-51"></span></li><li><p class="line891"><a class="http" href="http://javatechniques.com/public/java/docs/basics/lucene-memory-search.html">Lucene In-Memory Text Search</a> (by Philip Isenhour)<br>
+(<em>Last updated: May 2004 - tutorial</em>) <span class="anchor" id="line-52"></span></li><li><p class="line891"><a class="http" href="http://www.javaranch.com/newsletter/200404/Lucene.html">The Lucene Search Engine: Adding Search to Your Applications</a> (by Thomas Paul)<br>
+(<em>Published: April 2004 - article</em>) <span class="anchor" id="line-53"></span></li><li><p class="line891"><a class="http" href="http://www.darksleep.com/lucene/">Lucene Tutorial</a> (by Steven J. Owens)<br>
+(<em>Last updated: March 2004 - tutorial</em>) <span class="anchor" id="line-54"></span></li><li><p class="line891"><a class="http" href="http://www-igm.univ-mlv.fr/~dr/XPOSE2003/lucene/articleLucene.html">Lucene Introduction in French</a> Exposés Système sur le thème de l'opensource : Analyse de la structure de Lucene. (by Sun Seng TAN)<br>
+(<em>Last updated: February 2004 - tutorial</em>) <span class="anchor" id="line-55"></span></li><li><p class="line891"><a class="http" href="http://today.java.net/pub/a/today/2003/11/07/QueryParserRules.html">QueryParser Rules</a> (by Erik Hatcher)<br>
+(<em>Published November 2003 - article</em>) <span class="anchor" id="line-56"></span></li><li><p class="line891"><a class="http" href="http://builder.com.com/5100-6389-5054799.html">Give your Web site its own search engine using Lucene</a> (by Jeffrey Linwood)<br>
+(<em>Published July 2003 - article</em>) <span class="anchor" id="line-57"></span></li><li><p class="line891"><a class="http" href="http://today.java.net/pub/a/today/2003/07/30/LuceneIntro.html">Lucene Intro</a> (by Erik Hatcher)<br>
+(<em>Published: July 2003 - article</em>) <span class="anchor" id="line-58"></span></li><li><p class="line891"><a class="http" href="http://www-106.ibm.com/developerworks/library/j-lucene/">Parsing, indexing, and searching XML with Digester and Lucene</a> (by Otis Gospodneti&#263;)<br>
+(<em>Published June 2003 - article</em>) <span class="anchor" id="line-59"></span></li><li><p class="line891"><a class="http" href="http://www.xml.com/pub/a/ws/2003/05/13/email.html">Using Python, Jython, and Lucene to Search Outlook Email</a> (by Jon Udell)<br>
+(<em>Published: May 2003 - article</em>) <span class="anchor" id="line-60"></span></li><li><p class="line891"><a class="http" href="http://www.onjava.com/pub/a/onjava/2003/03/05/lucene.html">Advanced Text Indexing with Lucene</a> (by Otis Gospodneti&#263;)<br>
+(<em>Published: March 2003 - article</em>) <span class="anchor" id="line-61"></span></li><li><p class="line891"><a class="http" href="http://www.onjava.com/pub/a/onjava/2003/01/15/lucene.html">Introduction to Text Indexing with Apache Jakarta Lucene</a> (by Otis Gospodneti&#263;)<br>
+(<em>Published: January 2003 - article</em>) <span class="anchor" id="line-62"></span></li><li><p class="line862">Manfred Hardt: "Suchmaschinen entwickeln mit Java und Lucene - Wo war denn noch gleich ... ?"; JavaMagazin 9/2002; Software &amp; Support Verlag, Frankfurt/Main, Germany <span class="anchor" id="line-63"></span></li><li><p class="line891"><a class="http" href="http://javangelist.snipsnap.org/space/Lucene-Mini-Tutorial">Lucene Mini-Tutorial</a> (by funzel)<br>
+(<em>Last updated: April 2002 - tutorial</em>) <span class="anchor" id="line-64"></span></li><li><p class="line891"><a class="http" href="http://www.javaworld.com/javaworld/jw-09-2000/jw-0915-lucene.html">The Lucene search engine Powerful flexible and free</a> (by Brian Goetz)<br>
+(<em>Published September 2000 - article</em>) <span class="anchor" id="line-65"></span><span class="anchor" id="line-66"></span></li></ul><p class="line867">
+<h1 id="Interviews">Interviews</h1>
+<span class="anchor" id="line-67"></span><span class="anchor" id="line-68"></span><ul><li><p class="line891"><a class="http" href="http://www.lucidimagination.com/index.php?option=com_content&amp;task=view&amp;id=109">Interview with Lucene creator Doug Cutting</a> Podcast.  Summary: Doug talks about the creation of Lucene, Nutch and Hadoop. (<em>Published January 2009</em>) <span class="anchor" id="line-69"></span></li><li><p class="line891"><a class="http" href="http://www.lucidimagination.com/index.php?option=com_content&amp;task=view&amp;id=108">Interview with Lucene/Solr committer Chris Hostetter</a> Podcast.  Summary: Chris talks about Solr, Lucene and their usage at CNET. (<em>Published January 2009</em>) <span class="anchor" id="line-70"></span></li><li><p class="line891"><a class="http" href="http://www.lucidimagination.com/index.php?option=com_content&amp;task=view&amp;id=113">Interview with Lucene/Solr committer Ryan McKinley</a> Podcast.  Summary: Ryan discusses Solr, Lucene and geospatial searching with Lucene (<a class="nonexistent" href="/lucene-java/LocalLucene/LocalSolr">LocalLucene/LocalSolr</a>) and his usage of Lucene/Solr throughout his career. (<em>Published January 2009</em>) <span class="anchor" id="line-71"></span><span class="anchor" id="line-72"></span><span class="anchor" id="line-73"></span><span class="anchor" id="line-74"></span></li></ul><p class="line867">
+<h1 id="Papers">Papers</h1>
+<span class="anchor" id="line-75"></span><span class="anchor" id="line-76"></span><ul><li><p class="line891"><a class="http" href="http://lucene.sourceforge.net/publications.html">http://lucene.sourceforge.net/publications.html</a> Doug Cuttings papers from the old Lucene web site <span class="anchor" id="line-77"></span><span class="anchor" id="line-78"></span></li></ul><p class="line867">
+<h1 id="Presentations">Presentations</h1>
+<span class="anchor" id="line-79"></span><ul><li><p class="line891"><a class="http" href="http://people.apache.org/~buschmi/apachecon/AdvancedIndexingLuceneAtlanta07.ppt">Advanced Indexing Techniques with Apache Lucene - Payloads</a> presented by Michael Busch at <a class="http" href="http://www.us.apachecon.com/us2007/">ApacheCon U.S. 2007</a><br>
+(<em>Presented November 2007 - PDF slide show</em>) <span class="anchor" id="line-80"></span></li><li><p class="line891"><a class="http" href="http://people.apache.org/~yonik/presentations/lucene_intro.pdf">Full-Text Search with Lucene</a> presented by Yonik Seeley at <a class="http" href="http://www.eu.apachecon.com">ApacheCon Europe 2007</a>.<br>
+(<em>Presented May 2007 - PDF slide show</em>) <span class="anchor" id="line-81"></span></li><li><p class="line891"><a class="http" href="http://www.cnlp.org/presentations/slides/AdvancedLuceneEU.pdf">Advanced Lucene</a> presented by Grant Ingersoll of <a class="http" href="http://www.cnlp.org">CNLP</a> at <a class="http" href="http://www.eu.apachecon.com">ApacheCon Europe 2007</a>.  Covers term vectors, query tips and tricks and Lucene performance tuning related to indexing, searching and document retrieval.<br>
+(<em>Presented May 2007 - PDF slide show</em>) <span class="anchor" id="line-82"></span></li><li><p class="line891"><a class="http" href="http://blogs.atlassian.com/rebelutionary/downloads/tssjs2007-lucene-generic-data-indexing.pdf">Lucene: Generic Data Indexing</a> presented by Mike Cannon-Brookes, CEO, <a class="http" href="http://www.atlassian.com/">Atlassian Software Systems</a> at <a class="http" href="http://javasymposium.techtarget.com/lasvegas/index.html">TSSJS Las Vegas 2007</a>.  Covers how Atlassian use Lucene as a generic indexing framework for indexing and finding arbitrary collections of complex objects.<br>
+(<em>Presented March 2007 - PDF slide show</em>) <span class="anchor" id="line-83"></span></li><li><p class="line891"><a class="http" href="http://www.cnlp.org/apachecon2005/AdvancedLucene.ppt">Advanced Lucene</a> presented by Grant Ingersoll of the <a class="http" href="http://www.cnlp.org">Center for Natural Language Processing</a> at <a class="http" href="http://www.apachecon.com">ApacheCon 2005</a>.  Covers term vectors, span queries, using Lucene in a basic question answering system, and several Lucene case studies from <a class="http" href="http://www.cnlp.org">http://www.cnlp.org</a>.  The accompanying <a class="http" href="http://www.cnlp.org/apachecon2005">CNLP ApacheCon 2005 Information website</a> contains many working examples using term vectors and span queries. <span class="anchor" id="line-84"></span></li><li><p class="line891"><a class="http" href="http://lucene.sourceforge.net/talks/pisa/">Lucene lecture at The University of Pisa</a> (by Doug Cutting)<br>
+(<em>Presented November 2004 - lecture notes</em>) <span class="anchor" id="line-85"></span></li><li><p class="line891"><a class="http" href="http://conferences.oreillynet.com/presentations/os2003/hatcher_erik_lucene.pdf">Introducing Lucene</a> (by Erik Hatcher)<br>
+(<em>Presented at OS2003, July 2003 - PDF slide show</em>) <span class="anchor" id="line-86"></span></li><li><p class="line891"><a class="http" href="http://lucene.sourceforge.net/talks/inktomi/">The Lucene Search Engine: Inktomi Seminar</a> (by Doug Cutting)<br>
+(<em>Presented June, 2000 - seminar notes</em>) <span class="anchor" id="line-87"></span><span class="anchor" id="line-88"></span></li></ul><p class="line867">
+<h1 id="Training">Training</h1>
+<span class="anchor" id="line-89"></span><span class="anchor" id="line-90"></span><ul><li><p class="line891"><a class="http" href="http://www.lucidimagination.com/How-We-Can-Help/Training/">http://www.lucidimagination.com/How-We-Can-Help/Training/</a> - Training on Lucene created by Lucene committers and contributors (Grant Ingersoll, Erik Hatcher and the rest of the team at Lucid Imagination).   <span class="anchor" id="line-91"></span></li><li><p class="line891"><a class="http" href="http://www.lucenebootcamp.com">Lucene Boot Camp</a> - Training by Lucene committer Grant Ingersoll.  Offered exclusively at <a class="http" href="http://www.apachecon.com">ApacheCon</a>. <span class="anchor" id="line-92"></span><span class="anchor" id="line-93"></span></li></ul><p class="line867">
+<h1 id="Corpora">Corpora</h1>
+<span class="anchor" id="line-94"></span><ul><li><p class="line862">DMOZ RDF dump - <a class="http" href="http://rdf.dmoz.org/">http://rdf.dmoz.org/</a> <span class="anchor" id="line-95"></span></li><li><p class="line862">CMU newsgroups  - <a class="http" href="http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20.html">http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20.html</a> <span class="anchor" id="line-96"></span></li><li><p class="line862">CMU webpages  - <a class="http" href="http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/">http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/</a> <span class="anchor" id="line-97"></span></li><li><p class="line862">Reuters  - <a class="http" href="http://www.daviddlewis.com/resources/testcollections/reuters21578">http://www.daviddlewis.com/resources/testcollections/reuters21578</a> <span class="anchor" id="line-98"></span></li><li><p class="line862">Enron emails - <a class="http" href="http://www-2.cs.cmu.edu/~enron/">http://www-2.cs.cmu.edu/~enron/</a> <span class="anchor" id="line-99"></span></li><li><p class="line862">JRC-ACQUIS Multilingual Parallel Corpus - <a class="http" href="http://wt.jrc.it/lt/Acquis/">http://wt.jrc.it/lt/Acquis/</a> <span class="anchor" id="line-100"></span><span class="anchor" id="line-101"></span></li></ul><p class="line867">
+<h1 id="Other">Other</h1>
+<span class="anchor" id="line-102"></span><ul><li><p class="line891"><a class="http" href="http://www.java201.com/resources/browse/38-all.html">Lucene Resources</a> - Articles, Books, FAQs, Forums, Presentations, Wiki. <span class="anchor" id="line-103"></span></li><li><p class="line891"><a class="http" href="http://www.nabble.com/Web-Search-f2787.html">Lucene Search Forum</a> - hosted by <a class="http" href="http://www.nabble.com">Nabble</a> archiving all Lucene and Nutch mailing lists into a searchable archive/forum. The search is coded using Lucene. <span class="anchor" id="line-104"></span></li><li><p class="line891"><a class="http" href="http://www.lucenetutorial.com">LuceneTutorial.com</a> - Tips and tricks, sample applications, code samples, best practices. <span class="anchor" id="line-105"></span></li></ul><span class="anchor" id="bottom"></span></div><p id="pageinfo" class="info" lang="en" dir="ltr">Resources  (last edited 2010-05-03 22:31:43 by <span title="SteveRowe @ ist-h335-d03.syr.edu[128.230.84.100]"><a class="nonexistent" href="/lucene-java/SteveRowe" title="SteveRowe @ ist-h335-d03.syr.edu[128.230.84.100]">SteveRowe</a></span>)</p>
+
+<div id="pagebottom"></div>
+</div>
+
+
+<div id="footer">
+<ul class="editbar"><li><span class="disabled">Immutable Page</span></li><li class="toggleCommentsButton" style="display:none;"><a href="#" class="nbcomment" onClick="toggleComments();return false;">Comments</a></li><li><a class="nbinfo" href="/lucene-java/Resources?action=info" rel="nofollow">Info</a></li><li>
+<form class="actionsmenu" method="GET" action="/lucene-java/Resources">
+<div>
+    <label>More Actions:</label>
+    <select name="action"
+        onchange="if ((this.selectedIndex != 0) &&
+                      (this.options[this.selectedIndex].disabled == false)) {
+                this.form.submit();
+            }
+            this.selectedIndex = 0;">
+        <option value="raw">Raw Text</option>
+<option value="print">Print View</option>
+<option value="RenderAsDocbook">Render as Docbook</option>
+<option value="refresh">Delete Cache</option>
+<option value="show" disabled class="disabled">------------------------</option>
+<option value="SpellCheck">Check Spelling</option>
+<option value="LikePages">Like Pages</option>
+<option value="LocalSiteMap">Local Site Map</option>
+<option value="show" disabled class="disabled">------------------------</option>
+<option value="RenamePage" disabled class="disabled">Rename Page</option>
+<option value="CopyPage">Copy Page</option>
+<option value="DeletePage" disabled class="disabled">Delete Page</option>
+<option value="show" disabled class="disabled">------------------------</option>
+<option value="MyPages">My Pages</option>
+<option value="show" disabled class="disabled">Subscribe User</option>
+<option value="show" disabled class="disabled">------------------------</option>
+<option value="show" disabled class="disabled">Remove Spam</option>
+<option value="show" disabled class="disabled">Revert to this revision</option>
+<option value="show" disabled class="disabled">Package Pages</option>
+<option value="SyncPages">Sync Pages</option>
+<option value="show" disabled class="disabled">------------------------</option>
+<option value="Load">Load</option>
+<option value="Save">Save</option>
+    </select>
+    <input type="submit" value="Do">
+    
+</div>
+<script type="text/javascript">
+<!--// Init menu
+actionsMenuInit('More Actions:');
+//-->
+</script>
+</form>
+</li></ul>
+
+<ul id="credits">
+<li><a href="http://moinmo.in/" title="This site uses the MoinMoin Wiki software.">MoinMoin Powered</a></li><li><a href="http://moinmo.in/Python" title="MoinMoin is written in Python.">Python Powered</a></li><li><a href="http://moinmo.in/GPL" title="MoinMoin is GPL licensed.">GPL licensed</a></li><li><a href="http://validator.w3.org/check?uri=referer" title="Click here to validate this page.">Valid HTML 4.01</a></li>
+</ul>
+
+
+</div>
+</body>
+</html>
+
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/LuceneResourcesWikiPageURLs.txt b/lucene/backwards/src/test/org/apache/lucene/analysis/LuceneResourcesWikiPageURLs.txt
new file mode 100644
index 0000000..e8ca5aa
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/LuceneResourcesWikiPageURLs.txt
@@ -0,0 +1,105 @@
+http://www.w3.org/TR/html4/strict.dtd
+http://lucene.apache.org/java/3_0_1/api/all/overview-summary.html#overview_description
+http://lucene.apache.org/java/3_0_1/gettingstarted.html
+http://lucene.grantingersoll.com
+http://www.lucidimagination.com/blog/
+http://blog.sematext.com/
+http://www.manning.com/hatcher3/hatcher3_cover150.jpg
+http://www.manning.com/hatcher3/hatcher3_cover150.jpg
+http://www.manning.com/hatcher3/hatcher3_cover150.jpg
+http://www.manning.com/hatcher3/
+http://www.amazon.com/Building-Search-Applications-Lucene-Lingpipe/dp/0615204252/
+http://www.amazon.co.jp/exec/obidos/ASIN/4774127809/503-9461699-1775907
+http://www.lucenebook.com
+http://www.amazon.com/exec/obidos/ASIN/1932394281
+Amazon.com
+http://www.amazon.de/Suchmaschinen-entwickeln-mit-Apache-Lucene/dp/3935042450
+http://www.lucidimagination.com/Community/Hear-from-the-Experts/Articles/Getting-Started-with-Lucene/
+http://www.lucidimagination.com/Community/Hear-from-the-Experts/Articles/Optimizing-Findability-in-Lucene-and-Solr/
+http://www.lucidimagination.com/Community/Hear-from-the-Experts/Articles/Debugging-Relevance-Issues-in-Search/
+http://www.lucidimagination.com/Community/Hear-from-the-Experts/Articles/Scaling-Lucene-and-Solr/
+http://www.lucidimagination.com/Community/Hear-from-the-Experts/Articles/Introduction-to-Apache-Lucene-and-Solr/
+http://cephas.net/blog/2008/03/30/how-morelikethis-works-in-lucene/
+http://schmidt.devlib.org/software/lucene-wikipedia.html
+http://marceloochoa.blogspot.com/2007/09/running-lucene-inside-your-oracle-jvm.html
+http://www.onjava.com/pub/a/onjava/2007/05/24/using-the-lucene-query-parser-without-lucene.html
+http://www.javaworld.com/javaworld/jw-09-2006/jw-0925-lucene.html
+http://www-128.ibm.com/developerworks/java/library/wa-lucene2/index.html?ca=drs-
+http://www.freesearch.pe.kr/tag/Lucene
+http://www-128.ibm.com/developerworks/java/library/wa-lucene/index.html
+http://www.onjava.com/pub/a/onjava/2006/01/18/using-lucene-to-search-java-source.html
+http://www.jroller.com/page/wakaleo/?anchor=lucene_a_tutorial_introduction_to
+http://blog.dev.sf.net/index.php?/archives/10-Behind-the-Scenes-of-the-SourceForge.net-Search-System.html
+SourceForge.net
+http://today.java.net/pub/a/today/2005/08/09/didyoumean.html
+http://www.developer.com/java/other/article.php/3490471
+http://www.theserverside.com/tt/articles/article.tss?l=ILoveLucene
+http://javaboutique.internet.com/tutorials/HTMLParser/article.html
+http://bilgidata.com/localhost/bilgidata/yazi.jsp@dosya=a_lucene.xml.html
+http://www.chedong.com/tech/lucene.html
+http://javatechniques.com/public/java/docs/basics/lucene-memory-search.html
+http://www.javaranch.com/newsletter/200404/Lucene.html
+http://www.darksleep.com/lucene/
+http://www-igm.univ-mlv.fr/~dr/XPOSE2003/lucene/articleLucene.html
+http://today.java.net/pub/a/today/2003/11/07/QueryParserRules.html
+http://builder.com.com/5100-6389-5054799.html
+http://today.java.net/pub/a/today/2003/07/30/LuceneIntro.html
+http://www-106.ibm.com/developerworks/library/j-lucene/
+http://www.xml.com/pub/a/ws/2003/05/13/email.html
+http://www.onjava.com/pub/a/onjava/2003/03/05/lucene.html
+http://www.onjava.com/pub/a/onjava/2003/01/15/lucene.html
+http://javangelist.snipsnap.org/space/Lucene-Mini-Tutorial
+http://www.javaworld.com/javaworld/jw-09-2000/jw-0915-lucene.html
+http://www.lucidimagination.com/index.php?option=com_content&amp;task=view&amp;id=109
+http://www.lucidimagination.com/index.php?option=com_content&amp;task=view&amp;id=108
+http://www.lucidimagination.com/index.php?option=com_content&amp;task=view&amp;id=113
+http://lucene.sourceforge.net/publications.html
+http://lucene.sourceforge.net/publications.html
+http://people.apache.org/~buschmi/apachecon/AdvancedIndexingLuceneAtlanta07.ppt
+http://www.us.apachecon.com/us2007/
+http://people.apache.org/~yonik/presentations/lucene_intro.pdf
+http://www.eu.apachecon.com
+http://www.cnlp.org/presentations/slides/AdvancedLuceneEU.pdf
+http://www.cnlp.org
+http://www.eu.apachecon.com
+http://blogs.atlassian.com/rebelutionary/downloads/tssjs2007-lucene-generic-data-indexing.pdf
+http://www.atlassian.com/
+http://javasymposium.techtarget.com/lasvegas/index.html
+http://www.cnlp.org/apachecon2005/AdvancedLucene.ppt
+http://www.cnlp.org
+http://www.apachecon.com
+http://www.cnlp.org
+http://www.cnlp.org
+http://www.cnlp.org/apachecon2005
+http://lucene.sourceforge.net/talks/pisa/
+http://conferences.oreillynet.com/presentations/os2003/hatcher_erik_lucene.pdf
+http://lucene.sourceforge.net/talks/inktomi/
+http://www.lucidimagination.com/How-We-Can-Help/Training/
+http://www.lucidimagination.com/How-We-Can-Help/Training/
+http://www.lucenebootcamp.com
+http://www.apachecon.com
+http://rdf.dmoz.org/
+http://rdf.dmoz.org/
+http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20.html
+http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20.html
+http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/
+http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/
+http://www.daviddlewis.com/resources/testcollections/reuters21578
+http://www.daviddlewis.com/resources/testcollections/reuters21578
+http://www-2.cs.cmu.edu/~enron/
+http://www-2.cs.cmu.edu/~enron/
+http://wt.jrc.it/lt/Acquis/
+http://wt.jrc.it/lt/Acquis/
+http://www.java201.com/resources/browse/38-all.html
+http://www.nabble.com/Web-Search-f2787.html
+http://www.nabble.com
+http://www.lucenetutorial.com
+LuceneTutorial.com
+ist-h335-d03.syr.edu
+128.230.84.100
+ist-h335-d03.syr.edu
+128.230.84.100
+http://moinmo.in/
+http://moinmo.in/Python
+http://moinmo.in/GPL
+http://validator.w3.org/check?uri=referer
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java
new file mode 100644
index 0000000..ca90ad3
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java
@@ -0,0 +1,1907 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import java.io.StringReader;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Iterator;
+
+public class TestASCIIFoldingFilter extends BaseTokenStreamTestCase {
+
+  // testLain1Accents() is a copy of TestLatin1AccentFilter.testU().
+  public void testLatin1Accents() throws Exception {
+    TokenStream stream = new MockTokenizer(new StringReader
+      ("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ"
+      +" Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij"
+      +" ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl"), MockTokenizer.WHITESPACE, false);
+    ASCIIFoldingFilter filter = new ASCIIFoldingFilter(stream);
+
+    CharTermAttribute termAtt = filter.getAttribute(CharTermAttribute.class);
+    filter.reset();
+    assertTermEquals("Des", filter, termAtt);
+    assertTermEquals("mot", filter, termAtt);
+    assertTermEquals("cles", filter, termAtt);
+    assertTermEquals("A", filter, termAtt);
+    assertTermEquals("LA", filter, termAtt);
+    assertTermEquals("CHAINE", filter, termAtt);
+    assertTermEquals("A", filter, termAtt);
+    assertTermEquals("A", filter, termAtt);
+    assertTermEquals("A", filter, termAtt);
+    assertTermEquals("A", filter, termAtt);
+    assertTermEquals("A", filter, termAtt);
+    assertTermEquals("A", filter, termAtt);
+    assertTermEquals("AE", filter, termAtt);
+    assertTermEquals("C", filter, termAtt);
+    assertTermEquals("E", filter, termAtt);
+    assertTermEquals("E", filter, termAtt);
+    assertTermEquals("E", filter, termAtt);
+    assertTermEquals("E", filter, termAtt);
+    assertTermEquals("I", filter, termAtt);
+    assertTermEquals("I", filter, termAtt);
+    assertTermEquals("I", filter, termAtt);
+    assertTermEquals("I", filter, termAtt);
+    assertTermEquals("IJ", filter, termAtt);
+    assertTermEquals("D", filter, termAtt);
+    assertTermEquals("N", filter, termAtt);
+    assertTermEquals("O", filter, termAtt);
+    assertTermEquals("O", filter, termAtt);
+    assertTermEquals("O", filter, termAtt);
+    assertTermEquals("O", filter, termAtt);
+    assertTermEquals("O", filter, termAtt);
+    assertTermEquals("O", filter, termAtt);
+    assertTermEquals("OE", filter, termAtt);
+    assertTermEquals("TH", filter, termAtt);
+    assertTermEquals("U", filter, termAtt);
+    assertTermEquals("U", filter, termAtt);
+    assertTermEquals("U", filter, termAtt);
+    assertTermEquals("U", filter, termAtt);
+    assertTermEquals("Y", filter, termAtt);
+    assertTermEquals("Y", filter, termAtt);
+    assertTermEquals("a", filter, termAtt);
+    assertTermEquals("a", filter, termAtt);
+    assertTermEquals("a", filter, termAtt);
+    assertTermEquals("a", filter, termAtt);
+    assertTermEquals("a", filter, termAtt);
+    assertTermEquals("a", filter, termAtt);
+    assertTermEquals("ae", filter, termAtt);
+    assertTermEquals("c", filter, termAtt);
+    assertTermEquals("e", filter, termAtt);
+    assertTermEquals("e", filter, termAtt);
+    assertTermEquals("e", filter, termAtt);
+    assertTermEquals("e", filter, termAtt);
+    assertTermEquals("i", filter, termAtt);
+    assertTermEquals("i", filter, termAtt);
+    assertTermEquals("i", filter, termAtt);
+    assertTermEquals("i", filter, termAtt);
+    assertTermEquals("ij", filter, termAtt);
+    assertTermEquals("d", filter, termAtt);
+    assertTermEquals("n", filter, termAtt);
+    assertTermEquals("o", filter, termAtt);
+    assertTermEquals("o", filter, termAtt);
+    assertTermEquals("o", filter, termAtt);
+    assertTermEquals("o", filter, termAtt);
+    assertTermEquals("o", filter, termAtt);
+    assertTermEquals("o", filter, termAtt);
+    assertTermEquals("oe", filter, termAtt);
+    assertTermEquals("ss", filter, termAtt);
+    assertTermEquals("th", filter, termAtt);
+    assertTermEquals("u", filter, termAtt);
+    assertTermEquals("u", filter, termAtt);
+    assertTermEquals("u", filter, termAtt);
+    assertTermEquals("u", filter, termAtt);
+    assertTermEquals("y", filter, termAtt);
+    assertTermEquals("y", filter, termAtt);
+    assertTermEquals("fi", filter, termAtt);
+    assertTermEquals("fl", filter, termAtt);
+    assertFalse(filter.incrementToken());
+  }
+
+
+  // The following Perl script generated the foldings[] array automatically
+  // from ASCIIFoldingFilter.java:
+  //
+  //    ============== begin get.test.cases.pl ==============
+  //
+  //    use strict;
+  //    use warnings;
+  //
+  //    my $file = "ASCIIFoldingFilter.java";
+  //    my $output = "testcases.txt";
+  //    my %codes = ();
+  //    my $folded = '';
+  //
+  //    open IN, "<:utf8", $file || die "Error opening input file '$file': $!";
+  //    open OUT, ">:utf8", $output || die "Error opening output file '$output': $!";
+  //
+  //    while (my $line = <IN>) {
+  //      chomp($line);
+  //      # case '\u0133': // <char> <maybe URL> [ description ]
+  //      if ($line =~ /case\s+'\\u(....)':.*\[([^\]]+)\]/) {
+  //        my $code = $1;
+  //        my $desc = $2;
+  //        $codes{$code} = $desc;
+  //      }
+  //      # output[outputPos++] = 'A';
+  //      elsif ($line =~ /output\[outputPos\+\+\] = '(.+)';/) {
+  //        my $output_char = $1;
+  //        $folded .= $output_char;
+  //      }
+  //      elsif ($line =~ /break;/ && length($folded) > 0) {
+  //        my $first = 1;
+  //        for my $code (sort { hex($a) <=> hex($b) } keys %codes) {
+  //          my $desc = $codes{$code};
+  //          print OUT '      ';
+  //          print OUT '+ ' if (not $first);
+  //          $first = 0;
+  //          print OUT '"', chr(hex($code)), qq!"  // U+$code: $desc\n!;
+  //        }
+  //        print OUT qq!      ,"$folded", // Folded result\n\n!;
+  //        %codes = ();
+  //        $folded = '';
+  //      }
+  //    }
+  //    close OUT;
+  //
+  //    ============== end get.test.cases.pl ==============
+  //
+  public void testAllFoldings() throws Exception {
+    // Alternating strings of:
+    //   1. All non-ASCII characters to be folded, concatenated together as a
+    //      single string.
+    //   2. The string of ASCII characters to which each of the above
+    //      characters should be folded.
+    String[] foldings = {
+      "À"  // U+00C0: LATIN CAPITAL LETTER A WITH GRAVE
+      + "Á"  // U+00C1: LATIN CAPITAL LETTER A WITH ACUTE
+      + "Â"  // U+00C2: LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+      + "Ã"  // U+00C3: LATIN CAPITAL LETTER A WITH TILDE
+      + "Ä"  // U+00C4: LATIN CAPITAL LETTER A WITH DIAERESIS
+      + "Å"  // U+00C5: LATIN CAPITAL LETTER A WITH RING ABOVE
+      + "Ā"  // U+0100: LATIN CAPITAL LETTER A WITH MACRON
+      + "Ă"  // U+0102: LATIN CAPITAL LETTER A WITH BREVE
+      + "Ą"  // U+0104: LATIN CAPITAL LETTER A WITH OGONEK
+      + "Ə"  // U+018F: LATIN CAPITAL LETTER SCHWA
+      + "Ǎ"  // U+01CD: LATIN CAPITAL LETTER A WITH CARON
+      + "Ǟ"  // U+01DE: LATIN CAPITAL LETTER A WITH DIAERESIS AND MACRON
+      + "Ǡ"  // U+01E0: LATIN CAPITAL LETTER A WITH DOT ABOVE AND MACRON
+      + "Ǻ"  // U+01FA: LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE
+      + "Ȁ"  // U+0200: LATIN CAPITAL LETTER A WITH DOUBLE GRAVE
+      + "Ȃ"  // U+0202: LATIN CAPITAL LETTER A WITH INVERTED BREVE
+      + "Ȧ"  // U+0226: LATIN CAPITAL LETTER A WITH DOT ABOVE
+      + "Ⱥ"  // U+023A: LATIN CAPITAL LETTER A WITH STROKE
+      + "ᴀ"  // U+1D00: LATIN LETTER SMALL CAPITAL A
+      + "Ḁ"  // U+1E00: LATIN CAPITAL LETTER A WITH RING BELOW
+      + "Ạ"  // U+1EA0: LATIN CAPITAL LETTER A WITH DOT BELOW
+      + "Ả"  // U+1EA2: LATIN CAPITAL LETTER A WITH HOOK ABOVE
+      + "Ấ"  // U+1EA4: LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND ACUTE
+      + "Ầ"  // U+1EA6: LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND GRAVE
+      + "Ẩ"  // U+1EA8: LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE
+      + "Ẫ"  // U+1EAA: LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND TILDE
+      + "Ậ"  // U+1EAC: LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND DOT BELOW
+      + "Ắ"  // U+1EAE: LATIN CAPITAL LETTER A WITH BREVE AND ACUTE
+      + "Ằ"  // U+1EB0: LATIN CAPITAL LETTER A WITH BREVE AND GRAVE
+      + "Ẳ"  // U+1EB2: LATIN CAPITAL LETTER A WITH BREVE AND HOOK ABOVE
+      + "Ẵ"  // U+1EB4: LATIN CAPITAL LETTER A WITH BREVE AND TILDE
+      + "Ặ"  // U+1EB6: LATIN CAPITAL LETTER A WITH BREVE AND DOT BELOW
+      + "Ⓐ"  // U+24B6: CIRCLED LATIN CAPITAL LETTER A
+      + "A"  // U+FF21: FULLWIDTH LATIN CAPITAL LETTER A
+      ,"A", // Folded result
+
+       "à"  // U+00E0: LATIN SMALL LETTER A WITH GRAVE
+       + "á"  // U+00E1: LATIN SMALL LETTER A WITH ACUTE
+       + "â"  // U+00E2: LATIN SMALL LETTER A WITH CIRCUMFLEX
+       + "ã"  // U+00E3: LATIN SMALL LETTER A WITH TILDE
+       + "ä"  // U+00E4: LATIN SMALL LETTER A WITH DIAERESIS
+       + "å"  // U+00E5: LATIN SMALL LETTER A WITH RING ABOVE
+       + "ā"  // U+0101: LATIN SMALL LETTER A WITH MACRON
+       + "ă"  // U+0103: LATIN SMALL LETTER A WITH BREVE
+       + "ą"  // U+0105: LATIN SMALL LETTER A WITH OGONEK
+       + "ǎ"  // U+01CE: LATIN SMALL LETTER A WITH CARON
+       + "ǟ"  // U+01DF: LATIN SMALL LETTER A WITH DIAERESIS AND MACRON
+       + "ǡ"  // U+01E1: LATIN SMALL LETTER A WITH DOT ABOVE AND MACRON
+       + "ǻ"  // U+01FB: LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE
+       + "ȁ"  // U+0201: LATIN SMALL LETTER A WITH DOUBLE GRAVE
+       + "ȃ"  // U+0203: LATIN SMALL LETTER A WITH INVERTED BREVE
+       + "ȧ"  // U+0227: LATIN SMALL LETTER A WITH DOT ABOVE
+       + "ɐ"  // U+0250: LATIN SMALL LETTER TURNED A
+       + "ə"  // U+0259: LATIN SMALL LETTER SCHWA
+       + "ɚ"  // U+025A: LATIN SMALL LETTER SCHWA WITH HOOK
+       + "ᶏ"  // U+1D8F: LATIN SMALL LETTER A WITH RETROFLEX HOOK
+       + "ḁ"  // U+1E01: LATIN SMALL LETTER A WITH RING BELOW
+       + "ᶕ"  // U+1D95: LATIN SMALL LETTER SCHWA WITH RETROFLEX HOOK
+       + "ẚ"  // U+1E9A: LATIN SMALL LETTER A WITH RIGHT HALF RING
+       + "ạ"  // U+1EA1: LATIN SMALL LETTER A WITH DOT BELOW
+       + "ả"  // U+1EA3: LATIN SMALL LETTER A WITH HOOK ABOVE
+       + "ấ"  // U+1EA5: LATIN SMALL LETTER A WITH CIRCUMFLEX AND ACUTE
+       + "ầ"  // U+1EA7: LATIN SMALL LETTER A WITH CIRCUMFLEX AND GRAVE
+       + "ẩ"  // U+1EA9: LATIN SMALL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE
+       + "ẫ"  // U+1EAB: LATIN SMALL LETTER A WITH CIRCUMFLEX AND TILDE
+       + "ậ"  // U+1EAD: LATIN SMALL LETTER A WITH CIRCUMFLEX AND DOT BELOW
+       + "ắ"  // U+1EAF: LATIN SMALL LETTER A WITH BREVE AND ACUTE
+       + "ằ"  // U+1EB1: LATIN SMALL LETTER A WITH BREVE AND GRAVE
+       + "ẳ"  // U+1EB3: LATIN SMALL LETTER A WITH BREVE AND HOOK ABOVE
+       + "ẵ"  // U+1EB5: LATIN SMALL LETTER A WITH BREVE AND TILDE
+       + "ặ"  // U+1EB7: LATIN SMALL LETTER A WITH BREVE AND DOT BELOW
+       + "ₐ"  // U+2090: LATIN SUBSCRIPT SMALL LETTER A
+       + "ₔ"  // U+2094: LATIN SUBSCRIPT SMALL LETTER SCHWA
+       + "ⓐ"  // U+24D0: CIRCLED LATIN SMALL LETTER A
+       + "ⱥ"  // U+2C65: LATIN SMALL LETTER A WITH STROKE
+       + "Ɐ"  // U+2C6F: LATIN CAPITAL LETTER TURNED A
+       + "a"  // U+FF41: FULLWIDTH LATIN SMALL LETTER A
+      ,"a", // Folded result
+
+       "Ꜳ"  // U+A732: LATIN CAPITAL LETTER AA
+      ,"AA", // Folded result
+
+       "Æ"  // U+00C6: LATIN CAPITAL LETTER AE
+       + "Ǣ"  // U+01E2: LATIN CAPITAL LETTER AE WITH MACRON
+       + "Ǽ"  // U+01FC: LATIN CAPITAL LETTER AE WITH ACUTE
+       + "ᴁ"  // U+1D01: LATIN LETTER SMALL CAPITAL AE
+      ,"AE", // Folded result
+
+       "Ꜵ"  // U+A734: LATIN CAPITAL LETTER AO
+      ,"AO", // Folded result
+
+       "Ꜷ"  // U+A736: LATIN CAPITAL LETTER AU
+      ,"AU", // Folded result
+
+       "Ꜹ"  // U+A738: LATIN CAPITAL LETTER AV
+       + "Ꜻ"  // U+A73A: LATIN CAPITAL LETTER AV WITH HORIZONTAL BAR
+      ,"AV", // Folded result
+
+       "Ꜽ"  // U+A73C: LATIN CAPITAL LETTER AY
+      ,"AY", // Folded result
+
+       "⒜"  // U+249C: PARENTHESIZED LATIN SMALL LETTER A
+      ,"(a)", // Folded result
+
+       "ꜳ"  // U+A733: LATIN SMALL LETTER AA
+      ,"aa", // Folded result
+
+       "æ"  // U+00E6: LATIN SMALL LETTER AE
+       + "ǣ"  // U+01E3: LATIN SMALL LETTER AE WITH MACRON
+       + "ǽ"  // U+01FD: LATIN SMALL LETTER AE WITH ACUTE
+       + "ᴂ"  // U+1D02: LATIN SMALL LETTER TURNED AE
+      ,"ae", // Folded result
+
+       "ꜵ"  // U+A735: LATIN SMALL LETTER AO
+      ,"ao", // Folded result
+
+       "ꜷ"  // U+A737: LATIN SMALL LETTER AU
+      ,"au", // Folded result
+
+       "ꜹ"  // U+A739: LATIN SMALL LETTER AV
+       + "ꜻ"  // U+A73B: LATIN SMALL LETTER AV WITH HORIZONTAL BAR
+      ,"av", // Folded result
+
+       "ꜽ"  // U+A73D: LATIN SMALL LETTER AY
+      ,"ay", // Folded result
+
+       "Ɓ"  // U+0181: LATIN CAPITAL LETTER B WITH HOOK
+       + "Ƃ"  // U+0182: LATIN CAPITAL LETTER B WITH TOPBAR
+       + "Ƀ"  // U+0243: LATIN CAPITAL LETTER B WITH STROKE
+       + "ʙ"  // U+0299: LATIN LETTER SMALL CAPITAL B
+       + "ᴃ"  // U+1D03: LATIN LETTER SMALL CAPITAL BARRED B
+       + "Ḃ"  // U+1E02: LATIN CAPITAL LETTER B WITH DOT ABOVE
+       + "Ḅ"  // U+1E04: LATIN CAPITAL LETTER B WITH DOT BELOW
+       + "Ḇ"  // U+1E06: LATIN CAPITAL LETTER B WITH LINE BELOW
+       + "Ⓑ"  // U+24B7: CIRCLED LATIN CAPITAL LETTER B
+       + "B"  // U+FF22: FULLWIDTH LATIN CAPITAL LETTER B
+      ,"B", // Folded result
+
+       "ƀ"  // U+0180: LATIN SMALL LETTER B WITH STROKE
+       + "ƃ"  // U+0183: LATIN SMALL LETTER B WITH TOPBAR
+       + "ɓ"  // U+0253: LATIN SMALL LETTER B WITH HOOK
+       + "ᵬ"  // U+1D6C: LATIN SMALL LETTER B WITH MIDDLE TILDE
+       + "ᶀ"  // U+1D80: LATIN SMALL LETTER B WITH PALATAL HOOK
+       + "ḃ"  // U+1E03: LATIN SMALL LETTER B WITH DOT ABOVE
+       + "ḅ"  // U+1E05: LATIN SMALL LETTER B WITH DOT BELOW
+       + "ḇ"  // U+1E07: LATIN SMALL LETTER B WITH LINE BELOW
+       + "ⓑ"  // U+24D1: CIRCLED LATIN SMALL LETTER B
+       + "b"  // U+FF42: FULLWIDTH LATIN SMALL LETTER B
+      ,"b", // Folded result
+
+       "⒝"  // U+249D: PARENTHESIZED LATIN SMALL LETTER B
+      ,"(b)", // Folded result
+
+       "Ç"  // U+00C7: LATIN CAPITAL LETTER C WITH CEDILLA
+       + "Ć"  // U+0106: LATIN CAPITAL LETTER C WITH ACUTE
+       + "Ĉ"  // U+0108: LATIN CAPITAL LETTER C WITH CIRCUMFLEX
+       + "Ċ"  // U+010A: LATIN CAPITAL LETTER C WITH DOT ABOVE
+       + "Č"  // U+010C: LATIN CAPITAL LETTER C WITH CARON
+       + "Ƈ"  // U+0187: LATIN CAPITAL LETTER C WITH HOOK
+       + "Ȼ"  // U+023B: LATIN CAPITAL LETTER C WITH STROKE
+       + "ʗ"  // U+0297: LATIN LETTER STRETCHED C
+       + "ᴄ"  // U+1D04: LATIN LETTER SMALL CAPITAL C
+       + "Ḉ"  // U+1E08: LATIN CAPITAL LETTER C WITH CEDILLA AND ACUTE
+       + "Ⓒ"  // U+24B8: CIRCLED LATIN CAPITAL LETTER C
+       + "C"  // U+FF23: FULLWIDTH LATIN CAPITAL LETTER C
+      ,"C", // Folded result
+
+       "ç"  // U+00E7: LATIN SMALL LETTER C WITH CEDILLA
+       + "ć"  // U+0107: LATIN SMALL LETTER C WITH ACUTE
+       + "ĉ"  // U+0109: LATIN SMALL LETTER C WITH CIRCUMFLEX
+       + "ċ"  // U+010B: LATIN SMALL LETTER C WITH DOT ABOVE
+       + "č"  // U+010D: LATIN SMALL LETTER C WITH CARON
+       + "ƈ"  // U+0188: LATIN SMALL LETTER C WITH HOOK
+       + "ȼ"  // U+023C: LATIN SMALL LETTER C WITH STROKE
+       + "ɕ"  // U+0255: LATIN SMALL LETTER C WITH CURL
+       + "ḉ"  // U+1E09: LATIN SMALL LETTER C WITH CEDILLA AND ACUTE
+       + "ↄ"  // U+2184: LATIN SMALL LETTER REVERSED C
+       + "ⓒ"  // U+24D2: CIRCLED LATIN SMALL LETTER C
+       + "Ꜿ"  // U+A73E: LATIN CAPITAL LETTER REVERSED C WITH DOT
+       + "ꜿ"  // U+A73F: LATIN SMALL LETTER REVERSED C WITH DOT
+       + "c"  // U+FF43: FULLWIDTH LATIN SMALL LETTER C
+      ,"c", // Folded result
+
+       "⒞"  // U+249E: PARENTHESIZED LATIN SMALL LETTER C
+      ,"(c)", // Folded result
+
+       "Ð"  // U+00D0: LATIN CAPITAL LETTER ETH
+       + "Ď"  // U+010E: LATIN CAPITAL LETTER D WITH CARON
+       + "Đ"  // U+0110: LATIN CAPITAL LETTER D WITH STROKE
+       + "Ɖ"  // U+0189: LATIN CAPITAL LETTER AFRICAN D
+       + "Ɗ"  // U+018A: LATIN CAPITAL LETTER D WITH HOOK
+       + "Ƌ"  // U+018B: LATIN CAPITAL LETTER D WITH TOPBAR
+       + "ᴅ"  // U+1D05: LATIN LETTER SMALL CAPITAL D
+       + "ᴆ"  // U+1D06: LATIN LETTER SMALL CAPITAL ETH
+       + "Ḋ"  // U+1E0A: LATIN CAPITAL LETTER D WITH DOT ABOVE
+       + "Ḍ"  // U+1E0C: LATIN CAPITAL LETTER D WITH DOT BELOW
+       + "Ḏ"  // U+1E0E: LATIN CAPITAL LETTER D WITH LINE BELOW
+       + "Ḑ"  // U+1E10: LATIN CAPITAL LETTER D WITH CEDILLA
+       + "Ḓ"  // U+1E12: LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW
+       + "Ⓓ"  // U+24B9: CIRCLED LATIN CAPITAL LETTER D
+       + "Ꝺ"  // U+A779: LATIN CAPITAL LETTER INSULAR D
+       + "D"  // U+FF24: FULLWIDTH LATIN CAPITAL LETTER D
+      ,"D", // Folded result
+
+       "ð"  // U+00F0: LATIN SMALL LETTER ETH
+       + "ď"  // U+010F: LATIN SMALL LETTER D WITH CARON
+       + "đ"  // U+0111: LATIN SMALL LETTER D WITH STROKE
+       + "ƌ"  // U+018C: LATIN SMALL LETTER D WITH TOPBAR
+       + "ȡ"  // U+0221: LATIN SMALL LETTER D WITH CURL
+       + "ɖ"  // U+0256: LATIN SMALL LETTER D WITH TAIL
+       + "ɗ"  // U+0257: LATIN SMALL LETTER D WITH HOOK
+       + "ᵭ"  // U+1D6D: LATIN SMALL LETTER D WITH MIDDLE TILDE
+       + "ᶁ"  // U+1D81: LATIN SMALL LETTER D WITH PALATAL HOOK
+       + "ᶑ"  // U+1D91: LATIN SMALL LETTER D WITH HOOK AND TAIL
+       + "ḋ"  // U+1E0B: LATIN SMALL LETTER D WITH DOT ABOVE
+       + "ḍ"  // U+1E0D: LATIN SMALL LETTER D WITH DOT BELOW
+       + "ḏ"  // U+1E0F: LATIN SMALL LETTER D WITH LINE BELOW
+       + "ḑ"  // U+1E11: LATIN SMALL LETTER D WITH CEDILLA
+       + "ḓ"  // U+1E13: LATIN SMALL LETTER D WITH CIRCUMFLEX BELOW
+       + "ⓓ"  // U+24D3: CIRCLED LATIN SMALL LETTER D
+       + "ꝺ"  // U+A77A: LATIN SMALL LETTER INSULAR D
+       + "d"  // U+FF44: FULLWIDTH LATIN SMALL LETTER D
+      ,"d", // Folded result
+
+       "DŽ"  // U+01C4: LATIN CAPITAL LETTER DZ WITH CARON
+       + "DZ"  // U+01F1: LATIN CAPITAL LETTER DZ
+      ,"DZ", // Folded result
+
+       "Dž"  // U+01C5: LATIN CAPITAL LETTER D WITH SMALL LETTER Z WITH CARON
+       + "Dz"  // U+01F2: LATIN CAPITAL LETTER D WITH SMALL LETTER Z
+      ,"Dz", // Folded result
+
+       "⒟"  // U+249F: PARENTHESIZED LATIN SMALL LETTER D
+      ,"(d)", // Folded result
+
+       "ȸ"  // U+0238: LATIN SMALL LETTER DB DIGRAPH
+      ,"db", // Folded result
+
+       "dž"  // U+01C6: LATIN SMALL LETTER DZ WITH CARON
+       + "dz"  // U+01F3: LATIN SMALL LETTER DZ
+       + "ʣ"  // U+02A3: LATIN SMALL LETTER DZ DIGRAPH
+       + "ʥ"  // U+02A5: LATIN SMALL LETTER DZ DIGRAPH WITH CURL
+      ,"dz", // Folded result
+
+       "È"  // U+00C8: LATIN CAPITAL LETTER E WITH GRAVE
+       + "É"  // U+00C9: LATIN CAPITAL LETTER E WITH ACUTE
+       + "Ê"  // U+00CA: LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+       + "Ë"  // U+00CB: LATIN CAPITAL LETTER E WITH DIAERESIS
+       + "Ē"  // U+0112: LATIN CAPITAL LETTER E WITH MACRON
+       + "Ĕ"  // U+0114: LATIN CAPITAL LETTER E WITH BREVE
+       + "Ė"  // U+0116: LATIN CAPITAL LETTER E WITH DOT ABOVE
+       + "Ę"  // U+0118: LATIN CAPITAL LETTER E WITH OGONEK
+       + "Ě"  // U+011A: LATIN CAPITAL LETTER E WITH CARON
+       + "Ǝ"  // U+018E: LATIN CAPITAL LETTER REVERSED E
+       + "Ɛ"  // U+0190: LATIN CAPITAL LETTER OPEN E
+       + "Ȅ"  // U+0204: LATIN CAPITAL LETTER E WITH DOUBLE GRAVE
+       + "Ȇ"  // U+0206: LATIN CAPITAL LETTER E WITH INVERTED BREVE
+       + "Ȩ"  // U+0228: LATIN CAPITAL LETTER E WITH CEDILLA
+       + "Ɇ"  // U+0246: LATIN CAPITAL LETTER E WITH STROKE
+       + "ᴇ"  // U+1D07: LATIN LETTER SMALL CAPITAL E
+       + "Ḕ"  // U+1E14: LATIN CAPITAL LETTER E WITH MACRON AND GRAVE
+       + "Ḗ"  // U+1E16: LATIN CAPITAL LETTER E WITH MACRON AND ACUTE
+       + "Ḙ"  // U+1E18: LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW
+       + "Ḛ"  // U+1E1A: LATIN CAPITAL LETTER E WITH TILDE BELOW
+       + "Ḝ"  // U+1E1C: LATIN CAPITAL LETTER E WITH CEDILLA AND BREVE
+       + "Ẹ"  // U+1EB8: LATIN CAPITAL LETTER E WITH DOT BELOW
+       + "Ẻ"  // U+1EBA: LATIN CAPITAL LETTER E WITH HOOK ABOVE
+       + "Ẽ"  // U+1EBC: LATIN CAPITAL LETTER E WITH TILDE
+       + "Ế"  // U+1EBE: LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND ACUTE
+       + "Ề"  // U+1EC0: LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND GRAVE
+       + "Ể"  // U+1EC2: LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE
+       + "Ễ"  // U+1EC4: LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND TILDE
+       + "Ệ"  // U+1EC6: LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND DOT BELOW
+       + "Ⓔ"  // U+24BA: CIRCLED LATIN CAPITAL LETTER E
+       + "ⱻ"  // U+2C7B: LATIN LETTER SMALL CAPITAL TURNED E
+       + "E"  // U+FF25: FULLWIDTH LATIN CAPITAL LETTER E
+      ,"E", // Folded result
+
+       "è"  // U+00E8: LATIN SMALL LETTER E WITH GRAVE
+       + "é"  // U+00E9: LATIN SMALL LETTER E WITH ACUTE
+       + "ê"  // U+00EA: LATIN SMALL LETTER E WITH CIRCUMFLEX
+       + "ë"  // U+00EB: LATIN SMALL LETTER E WITH DIAERESIS
+       + "ē"  // U+0113: LATIN SMALL LETTER E WITH MACRON
+       + "ĕ"  // U+0115: LATIN SMALL LETTER E WITH BREVE
+       + "ė"  // U+0117: LATIN SMALL LETTER E WITH DOT ABOVE
+       + "ę"  // U+0119: LATIN SMALL LETTER E WITH OGONEK
+       + "ě"  // U+011B: LATIN SMALL LETTER E WITH CARON
+       + "ǝ"  // U+01DD: LATIN SMALL LETTER TURNED E
+       + "ȅ"  // U+0205: LATIN SMALL LETTER E WITH DOUBLE GRAVE
+       + "ȇ"  // U+0207: LATIN SMALL LETTER E WITH INVERTED BREVE
+       + "ȩ"  // U+0229: LATIN SMALL LETTER E WITH CEDILLA
+       + "ɇ"  // U+0247: LATIN SMALL LETTER E WITH STROKE
+       + "ɘ"  // U+0258: LATIN SMALL LETTER REVERSED E
+       + "ɛ"  // U+025B: LATIN SMALL LETTER OPEN E
+       + "ɜ"  // U+025C: LATIN SMALL LETTER REVERSED OPEN E
+       + "ɝ"  // U+025D: LATIN SMALL LETTER REVERSED OPEN E WITH HOOK
+       + "ɞ"  // U+025E: LATIN SMALL LETTER CLOSED REVERSED OPEN E
+       + "ʚ"  // U+029A: LATIN SMALL LETTER CLOSED OPEN E
+       + "ᴈ"  // U+1D08: LATIN SMALL LETTER TURNED OPEN E
+       + "ᶒ"  // U+1D92: LATIN SMALL LETTER E WITH RETROFLEX HOOK
+       + "ᶓ"  // U+1D93: LATIN SMALL LETTER OPEN E WITH RETROFLEX HOOK
+       + "ᶔ"  // U+1D94: LATIN SMALL LETTER REVERSED OPEN E WITH RETROFLEX HOOK
+       + "ḕ"  // U+1E15: LATIN SMALL LETTER E WITH MACRON AND GRAVE
+       + "ḗ"  // U+1E17: LATIN SMALL LETTER E WITH MACRON AND ACUTE
+       + "ḙ"  // U+1E19: LATIN SMALL LETTER E WITH CIRCUMFLEX BELOW
+       + "ḛ"  // U+1E1B: LATIN SMALL LETTER E WITH TILDE BELOW
+       + "ḝ"  // U+1E1D: LATIN SMALL LETTER E WITH CEDILLA AND BREVE
+       + "ẹ"  // U+1EB9: LATIN SMALL LETTER E WITH DOT BELOW
+       + "ẻ"  // U+1EBB: LATIN SMALL LETTER E WITH HOOK ABOVE
+       + "ẽ"  // U+1EBD: LATIN SMALL LETTER E WITH TILDE
+       + "ế"  // U+1EBF: LATIN SMALL LETTER E WITH CIRCUMFLEX AND ACUTE
+       + "ề"  // U+1EC1: LATIN SMALL LETTER E WITH CIRCUMFLEX AND GRAVE
+       + "ể"  // U+1EC3: LATIN SMALL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE
+       + "ễ"  // U+1EC5: LATIN SMALL LETTER E WITH CIRCUMFLEX AND TILDE
+       + "ệ"  // U+1EC7: LATIN SMALL LETTER E WITH CIRCUMFLEX AND DOT BELOW
+       + "ₑ"  // U+2091: LATIN SUBSCRIPT SMALL LETTER E
+       + "ⓔ"  // U+24D4: CIRCLED LATIN SMALL LETTER E
+       + "ⱸ"  // U+2C78: LATIN SMALL LETTER E WITH NOTCH
+       + "e"  // U+FF45: FULLWIDTH LATIN SMALL LETTER E
+      ,"e", // Folded result
+
+       "⒠"  // U+24A0: PARENTHESIZED LATIN SMALL LETTER E
+      ,"(e)", // Folded result
+
+       "Ƒ"  // U+0191: LATIN CAPITAL LETTER F WITH HOOK
+       + "Ḟ"  // U+1E1E: LATIN CAPITAL LETTER F WITH DOT ABOVE
+       + "Ⓕ"  // U+24BB: CIRCLED LATIN CAPITAL LETTER F
+       + "ꜰ"  // U+A730: LATIN LETTER SMALL CAPITAL F
+       + "Ꝼ"  // U+A77B: LATIN CAPITAL LETTER INSULAR F
+       + "ꟻ"  // U+A7FB: LATIN EPIGRAPHIC LETTER REVERSED F
+       + "F"  // U+FF26: FULLWIDTH LATIN CAPITAL LETTER F
+      ,"F", // Folded result
+
+       "ƒ"  // U+0192: LATIN SMALL LETTER F WITH HOOK
+       + "ᵮ"  // U+1D6E: LATIN SMALL LETTER F WITH MIDDLE TILDE
+       + "ᶂ"  // U+1D82: LATIN SMALL LETTER F WITH PALATAL HOOK
+       + "ḟ"  // U+1E1F: LATIN SMALL LETTER F WITH DOT ABOVE
+       + "ẛ"  // U+1E9B: LATIN SMALL LETTER LONG S WITH DOT ABOVE
+       + "ⓕ"  // U+24D5: CIRCLED LATIN SMALL LETTER F
+       + "ꝼ"  // U+A77C: LATIN SMALL LETTER INSULAR F
+       + "f"  // U+FF46: FULLWIDTH LATIN SMALL LETTER F
+      ,"f", // Folded result
+
+       "⒡"  // U+24A1: PARENTHESIZED LATIN SMALL LETTER F
+      ,"(f)", // Folded result
+
+       "ff"  // U+FB00: LATIN SMALL LIGATURE FF
+      ,"ff", // Folded result
+
+       "ffi"  // U+FB03: LATIN SMALL LIGATURE FFI
+      ,"ffi", // Folded result
+
+       "ffl"  // U+FB04: LATIN SMALL LIGATURE FFL
+      ,"ffl", // Folded result
+
+       "fi"  // U+FB01: LATIN SMALL LIGATURE FI
+      ,"fi", // Folded result
+
+       "fl"  // U+FB02: LATIN SMALL LIGATURE FL
+      ,"fl", // Folded result
+
+       "Ĝ"  // U+011C: LATIN CAPITAL LETTER G WITH CIRCUMFLEX
+       + "Ğ"  // U+011E: LATIN CAPITAL LETTER G WITH BREVE
+       + "Ġ"  // U+0120: LATIN CAPITAL LETTER G WITH DOT ABOVE
+       + "Ģ"  // U+0122: LATIN CAPITAL LETTER G WITH CEDILLA
+       + "Ɠ"  // U+0193: LATIN CAPITAL LETTER G WITH HOOK
+       + "Ǥ"  // U+01E4: LATIN CAPITAL LETTER G WITH STROKE
+       + "ǥ"  // U+01E5: LATIN SMALL LETTER G WITH STROKE
+       + "Ǧ"  // U+01E6: LATIN CAPITAL LETTER G WITH CARON
+       + "ǧ"  // U+01E7: LATIN SMALL LETTER G WITH CARON
+       + "Ǵ"  // U+01F4: LATIN CAPITAL LETTER G WITH ACUTE
+       + "ɢ"  // U+0262: LATIN LETTER SMALL CAPITAL G
+       + "ʛ"  // U+029B: LATIN LETTER SMALL CAPITAL G WITH HOOK
+       + "Ḡ"  // U+1E20: LATIN CAPITAL LETTER G WITH MACRON
+       + "Ⓖ"  // U+24BC: CIRCLED LATIN CAPITAL LETTER G
+       + "Ᵹ"  // U+A77D: LATIN CAPITAL LETTER INSULAR G
+       + "Ꝿ"  // U+A77E: LATIN CAPITAL LETTER TURNED INSULAR G
+       + "G"  // U+FF27: FULLWIDTH LATIN CAPITAL LETTER G
+      ,"G", // Folded result
+
+       "ĝ"  // U+011D: LATIN SMALL LETTER G WITH CIRCUMFLEX
+       + "ğ"  // U+011F: LATIN SMALL LETTER G WITH BREVE
+       + "ġ"  // U+0121: LATIN SMALL LETTER G WITH DOT ABOVE
+       + "ģ"  // U+0123: LATIN SMALL LETTER G WITH CEDILLA
+       + "ǵ"  // U+01F5: LATIN SMALL LETTER G WITH ACUTE
+       + "ɠ"  // U+0260: LATIN SMALL LETTER G WITH HOOK
+       + "ɡ"  // U+0261: LATIN SMALL LETTER SCRIPT G
+       + "ᵷ"  // U+1D77: LATIN SMALL LETTER TURNED G
+       + "ᵹ"  // U+1D79: LATIN SMALL LETTER INSULAR G
+       + "ᶃ"  // U+1D83: LATIN SMALL LETTER G WITH PALATAL HOOK
+       + "ḡ"  // U+1E21: LATIN SMALL LETTER G WITH MACRON
+       + "ⓖ"  // U+24D6: CIRCLED LATIN SMALL LETTER G
+       + "ꝿ"  // U+A77F: LATIN SMALL LETTER TURNED INSULAR G
+       + "g"  // U+FF47: FULLWIDTH LATIN SMALL LETTER G
+      ,"g", // Folded result
+
+       "⒢"  // U+24A2: PARENTHESIZED LATIN SMALL LETTER G
+      ,"(g)", // Folded result
+
+       "Ĥ"  // U+0124: LATIN CAPITAL LETTER H WITH CIRCUMFLEX
+       + "Ħ"  // U+0126: LATIN CAPITAL LETTER H WITH STROKE
+       + "Ȟ"  // U+021E: LATIN CAPITAL LETTER H WITH CARON
+       + "ʜ"  // U+029C: LATIN LETTER SMALL CAPITAL H
+       + "Ḣ"  // U+1E22: LATIN CAPITAL LETTER H WITH DOT ABOVE
+       + "Ḥ"  // U+1E24: LATIN CAPITAL LETTER H WITH DOT BELOW
+       + "Ḧ"  // U+1E26: LATIN CAPITAL LETTER H WITH DIAERESIS
+       + "Ḩ"  // U+1E28: LATIN CAPITAL LETTER H WITH CEDILLA
+       + "Ḫ"  // U+1E2A: LATIN CAPITAL LETTER H WITH BREVE BELOW
+       + "Ⓗ"  // U+24BD: CIRCLED LATIN CAPITAL LETTER H
+       + "Ⱨ"  // U+2C67: LATIN CAPITAL LETTER H WITH DESCENDER
+       + "Ⱶ"  // U+2C75: LATIN CAPITAL LETTER HALF H
+       + "H"  // U+FF28: FULLWIDTH LATIN CAPITAL LETTER H
+      ,"H", // Folded result
+
+       "ĥ"  // U+0125: LATIN SMALL LETTER H WITH CIRCUMFLEX
+       + "ħ"  // U+0127: LATIN SMALL LETTER H WITH STROKE
+       + "ȟ"  // U+021F: LATIN SMALL LETTER H WITH CARON
+       + "ɥ"  // U+0265: LATIN SMALL LETTER TURNED H
+       + "ɦ"  // U+0266: LATIN SMALL LETTER H WITH HOOK
+       + "ʮ"  // U+02AE: LATIN SMALL LETTER TURNED H WITH FISHHOOK
+       + "ʯ"  // U+02AF: LATIN SMALL LETTER TURNED H WITH FISHHOOK AND TAIL
+       + "ḣ"  // U+1E23: LATIN SMALL LETTER H WITH DOT ABOVE
+       + "ḥ"  // U+1E25: LATIN SMALL LETTER H WITH DOT BELOW
+       + "ḧ"  // U+1E27: LATIN SMALL LETTER H WITH DIAERESIS
+       + "ḩ"  // U+1E29: LATIN SMALL LETTER H WITH CEDILLA
+       + "ḫ"  // U+1E2B: LATIN SMALL LETTER H WITH BREVE BELOW
+       + "ẖ"  // U+1E96: LATIN SMALL LETTER H WITH LINE BELOW
+       + "ⓗ"  // U+24D7: CIRCLED LATIN SMALL LETTER H
+       + "ⱨ"  // U+2C68: LATIN SMALL LETTER H WITH DESCENDER
+       + "ⱶ"  // U+2C76: LATIN SMALL LETTER HALF H
+       + "h"  // U+FF48: FULLWIDTH LATIN SMALL LETTER H
+      ,"h", // Folded result
+
+       "Ƕ"  // U+01F6: LATIN CAPITAL LETTER HWAIR
+      ,"HV", // Folded result
+
+       "⒣"  // U+24A3: PARENTHESIZED LATIN SMALL LETTER H
+      ,"(h)", // Folded result
+
+       "ƕ"  // U+0195: LATIN SMALL LETTER HV
+      ,"hv", // Folded result
+
+       "Ì"  // U+00CC: LATIN CAPITAL LETTER I WITH GRAVE
+       + "Í"  // U+00CD: LATIN CAPITAL LETTER I WITH ACUTE
+       + "Î"  // U+00CE: LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+       + "Ï"  // U+00CF: LATIN CAPITAL LETTER I WITH DIAERESIS
+       + "Ĩ"  // U+0128: LATIN CAPITAL LETTER I WITH TILDE
+       + "Ī"  // U+012A: LATIN CAPITAL LETTER I WITH MACRON
+       + "Ĭ"  // U+012C: LATIN CAPITAL LETTER I WITH BREVE
+       + "Į"  // U+012E: LATIN CAPITAL LETTER I WITH OGONEK
+       + "İ"  // U+0130: LATIN CAPITAL LETTER I WITH DOT ABOVE
+       + "Ɩ"  // U+0196: LATIN CAPITAL LETTER IOTA
+       + "Ɨ"  // U+0197: LATIN CAPITAL LETTER I WITH STROKE
+       + "Ǐ"  // U+01CF: LATIN CAPITAL LETTER I WITH CARON
+       + "Ȉ"  // U+0208: LATIN CAPITAL LETTER I WITH DOUBLE GRAVE
+       + "Ȋ"  // U+020A: LATIN CAPITAL LETTER I WITH INVERTED BREVE
+       + "ɪ"  // U+026A: LATIN LETTER SMALL CAPITAL I
+       + "ᵻ"  // U+1D7B: LATIN SMALL CAPITAL LETTER I WITH STROKE
+       + "Ḭ"  // U+1E2C: LATIN CAPITAL LETTER I WITH TILDE BELOW
+       + "Ḯ"  // U+1E2E: LATIN CAPITAL LETTER I WITH DIAERESIS AND ACUTE
+       + "Ỉ"  // U+1EC8: LATIN CAPITAL LETTER I WITH HOOK ABOVE
+       + "Ị"  // U+1ECA: LATIN CAPITAL LETTER I WITH DOT BELOW
+       + "Ⓘ"  // U+24BE: CIRCLED LATIN CAPITAL LETTER I
+       + "ꟾ"  // U+A7FE: LATIN EPIGRAPHIC LETTER I LONGA
+       + "I"  // U+FF29: FULLWIDTH LATIN CAPITAL LETTER I
+      ,"I", // Folded result
+
+       "ì"  // U+00EC: LATIN SMALL LETTER I WITH GRAVE
+       + "í"  // U+00ED: LATIN SMALL LETTER I WITH ACUTE
+       + "î"  // U+00EE: LATIN SMALL LETTER I WITH CIRCUMFLEX
+       + "ï"  // U+00EF: LATIN SMALL LETTER I WITH DIAERESIS
+       + "ĩ"  // U+0129: LATIN SMALL LETTER I WITH TILDE
+       + "ī"  // U+012B: LATIN SMALL LETTER I WITH MACRON
+       + "ĭ"  // U+012D: LATIN SMALL LETTER I WITH BREVE
+       + "į"  // U+012F: LATIN SMALL LETTER I WITH OGONEK
+       + "ı"  // U+0131: LATIN SMALL LETTER DOTLESS I
+       + "ǐ"  // U+01D0: LATIN SMALL LETTER I WITH CARON
+       + "ȉ"  // U+0209: LATIN SMALL LETTER I WITH DOUBLE GRAVE
+       + "ȋ"  // U+020B: LATIN SMALL LETTER I WITH INVERTED BREVE
+       + "ɨ"  // U+0268: LATIN SMALL LETTER I WITH STROKE
+       + "ᴉ"  // U+1D09: LATIN SMALL LETTER TURNED I
+       + "ᵢ"  // U+1D62: LATIN SUBSCRIPT SMALL LETTER I
+       + "ᵼ"  // U+1D7C: LATIN SMALL LETTER IOTA WITH STROKE
+       + "ᶖ"  // U+1D96: LATIN SMALL LETTER I WITH RETROFLEX HOOK
+       + "ḭ"  // U+1E2D: LATIN SMALL LETTER I WITH TILDE BELOW
+       + "ḯ"  // U+1E2F: LATIN SMALL LETTER I WITH DIAERESIS AND ACUTE
+       + "ỉ"  // U+1EC9: LATIN SMALL LETTER I WITH HOOK ABOVE
+       + "ị"  // U+1ECB: LATIN SMALL LETTER I WITH DOT BELOW
+       + "ⁱ"  // U+2071: SUPERSCRIPT LATIN SMALL LETTER I
+       + "ⓘ"  // U+24D8: CIRCLED LATIN SMALL LETTER I
+       + "i"  // U+FF49: FULLWIDTH LATIN SMALL LETTER I
+      ,"i", // Folded result
+
+       "IJ"  // U+0132: LATIN CAPITAL LIGATURE IJ
+      ,"IJ", // Folded result
+
+       "⒤"  // U+24A4: PARENTHESIZED LATIN SMALL LETTER I
+      ,"(i)", // Folded result
+
+       "ij"  // U+0133: LATIN SMALL LIGATURE IJ
+      ,"ij", // Folded result
+
+       "Ĵ"  // U+0134: LATIN CAPITAL LETTER J WITH CIRCUMFLEX
+       + "Ɉ"  // U+0248: LATIN CAPITAL LETTER J WITH STROKE
+       + "ᴊ"  // U+1D0A: LATIN LETTER SMALL CAPITAL J
+       + "Ⓙ"  // U+24BF: CIRCLED LATIN CAPITAL LETTER J
+       + "J"  // U+FF2A: FULLWIDTH LATIN CAPITAL LETTER J
+      ,"J", // Folded result
+
+       "ĵ"  // U+0135: LATIN SMALL LETTER J WITH CIRCUMFLEX
+       + "ǰ"  // U+01F0: LATIN SMALL LETTER J WITH CARON
+       + "ȷ"  // U+0237: LATIN SMALL LETTER DOTLESS J
+       + "ɉ"  // U+0249: LATIN SMALL LETTER J WITH STROKE
+       + "ɟ"  // U+025F: LATIN SMALL LETTER DOTLESS J WITH STROKE
+       + "ʄ"  // U+0284: LATIN SMALL LETTER DOTLESS J WITH STROKE AND HOOK
+       + "ʝ"  // U+029D: LATIN SMALL LETTER J WITH CROSSED-TAIL
+       + "ⓙ"  // U+24D9: CIRCLED LATIN SMALL LETTER J
+       + "ⱼ"  // U+2C7C: LATIN SUBSCRIPT SMALL LETTER J
+       + "j"  // U+FF4A: FULLWIDTH LATIN SMALL LETTER J
+      ,"j", // Folded result
+
+       "⒥"  // U+24A5: PARENTHESIZED LATIN SMALL LETTER J
+      ,"(j)", // Folded result
+
+       "Ķ"  // U+0136: LATIN CAPITAL LETTER K WITH CEDILLA
+       + "Ƙ"  // U+0198: LATIN CAPITAL LETTER K WITH HOOK
+       + "Ǩ"  // U+01E8: LATIN CAPITAL LETTER K WITH CARON
+       + "ᴋ"  // U+1D0B: LATIN LETTER SMALL CAPITAL K
+       + "Ḱ"  // U+1E30: LATIN CAPITAL LETTER K WITH ACUTE
+       + "Ḳ"  // U+1E32: LATIN CAPITAL LETTER K WITH DOT BELOW
+       + "Ḵ"  // U+1E34: LATIN CAPITAL LETTER K WITH LINE BELOW
+       + "Ⓚ"  // U+24C0: CIRCLED LATIN CAPITAL LETTER K
+       + "Ⱪ"  // U+2C69: LATIN CAPITAL LETTER K WITH DESCENDER
+       + "Ꝁ"  // U+A740: LATIN CAPITAL LETTER K WITH STROKE
+       + "Ꝃ"  // U+A742: LATIN CAPITAL LETTER K WITH DIAGONAL STROKE
+       + "Ꝅ"  // U+A744: LATIN CAPITAL LETTER K WITH STROKE AND DIAGONAL STROKE
+       + "K"  // U+FF2B: FULLWIDTH LATIN CAPITAL LETTER K
+      ,"K", // Folded result
+
+       "ķ"  // U+0137: LATIN SMALL LETTER K WITH CEDILLA
+       + "ƙ"  // U+0199: LATIN SMALL LETTER K WITH HOOK
+       + "ǩ"  // U+01E9: LATIN SMALL LETTER K WITH CARON
+       + "ʞ"  // U+029E: LATIN SMALL LETTER TURNED K
+       + "ᶄ"  // U+1D84: LATIN SMALL LETTER K WITH PALATAL HOOK
+       + "ḱ"  // U+1E31: LATIN SMALL LETTER K WITH ACUTE
+       + "ḳ"  // U+1E33: LATIN SMALL LETTER K WITH DOT BELOW
+       + "ḵ"  // U+1E35: LATIN SMALL LETTER K WITH LINE BELOW
+       + "ⓚ"  // U+24DA: CIRCLED LATIN SMALL LETTER K
+       + "ⱪ"  // U+2C6A: LATIN SMALL LETTER K WITH DESCENDER
+       + "ꝁ"  // U+A741: LATIN SMALL LETTER K WITH STROKE
+       + "ꝃ"  // U+A743: LATIN SMALL LETTER K WITH DIAGONAL STROKE
+       + "ꝅ"  // U+A745: LATIN SMALL LETTER K WITH STROKE AND DIAGONAL STROKE
+       + "k"  // U+FF4B: FULLWIDTH LATIN SMALL LETTER K
+      ,"k", // Folded result
+
+       "⒦"  // U+24A6: PARENTHESIZED LATIN SMALL LETTER K
+      ,"(k)", // Folded result
+
+       "Ĺ"  // U+0139: LATIN CAPITAL LETTER L WITH ACUTE
+       + "Ļ"  // U+013B: LATIN CAPITAL LETTER L WITH CEDILLA
+       + "Ľ"  // U+013D: LATIN CAPITAL LETTER L WITH CARON
+       + "Ŀ"  // U+013F: LATIN CAPITAL LETTER L WITH MIDDLE DOT
+       + "Ł"  // U+0141: LATIN CAPITAL LETTER L WITH STROKE
+       + "Ƚ"  // U+023D: LATIN CAPITAL LETTER L WITH BAR
+       + "ʟ"  // U+029F: LATIN LETTER SMALL CAPITAL L
+       + "ᴌ"  // U+1D0C: LATIN LETTER SMALL CAPITAL L WITH STROKE
+       + "Ḷ"  // U+1E36: LATIN CAPITAL LETTER L WITH DOT BELOW
+       + "Ḹ"  // U+1E38: LATIN CAPITAL LETTER L WITH DOT BELOW AND MACRON
+       + "Ḻ"  // U+1E3A: LATIN CAPITAL LETTER L WITH LINE BELOW
+       + "Ḽ"  // U+1E3C: LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW
+       + "Ⓛ"  // U+24C1: CIRCLED LATIN CAPITAL LETTER L
+       + "Ⱡ"  // U+2C60: LATIN CAPITAL LETTER L WITH DOUBLE BAR
+       + "Ɫ"  // U+2C62: LATIN CAPITAL LETTER L WITH MIDDLE TILDE
+       + "Ꝇ"  // U+A746: LATIN CAPITAL LETTER BROKEN L
+       + "Ꝉ"  // U+A748: LATIN CAPITAL LETTER L WITH HIGH STROKE
+       + "Ꞁ"  // U+A780: LATIN CAPITAL LETTER TURNED L
+       + "L"  // U+FF2C: FULLWIDTH LATIN CAPITAL LETTER L
+      ,"L", // Folded result
+
+       "ĺ"  // U+013A: LATIN SMALL LETTER L WITH ACUTE
+       + "ļ"  // U+013C: LATIN SMALL LETTER L WITH CEDILLA
+       + "ľ"  // U+013E: LATIN SMALL LETTER L WITH CARON
+       + "ŀ"  // U+0140: LATIN SMALL LETTER L WITH MIDDLE DOT
+       + "ł"  // U+0142: LATIN SMALL LETTER L WITH STROKE
+       + "ƚ"  // U+019A: LATIN SMALL LETTER L WITH BAR
+       + "ȴ"  // U+0234: LATIN SMALL LETTER L WITH CURL
+       + "ɫ"  // U+026B: LATIN SMALL LETTER L WITH MIDDLE TILDE
+       + "ɬ"  // U+026C: LATIN SMALL LETTER L WITH BELT
+       + "ɭ"  // U+026D: LATIN SMALL LETTER L WITH RETROFLEX HOOK
+       + "ᶅ"  // U+1D85: LATIN SMALL LETTER L WITH PALATAL HOOK
+       + "ḷ"  // U+1E37: LATIN SMALL LETTER L WITH DOT BELOW
+       + "ḹ"  // U+1E39: LATIN SMALL LETTER L WITH DOT BELOW AND MACRON
+       + "ḻ"  // U+1E3B: LATIN SMALL LETTER L WITH LINE BELOW
+       + "ḽ"  // U+1E3D: LATIN SMALL LETTER L WITH CIRCUMFLEX BELOW
+       + "ⓛ"  // U+24DB: CIRCLED LATIN SMALL LETTER L
+       + "ⱡ"  // U+2C61: LATIN SMALL LETTER L WITH DOUBLE BAR
+       + "ꝇ"  // U+A747: LATIN SMALL LETTER BROKEN L
+       + "ꝉ"  // U+A749: LATIN SMALL LETTER L WITH HIGH STROKE
+       + "ꞁ"  // U+A781: LATIN SMALL LETTER TURNED L
+       + "l"  // U+FF4C: FULLWIDTH LATIN SMALL LETTER L
+      ,"l", // Folded result
+
+       "LJ"  // U+01C7: LATIN CAPITAL LETTER LJ
+      ,"LJ", // Folded result
+
+       "Ỻ"  // U+1EFA: LATIN CAPITAL LETTER MIDDLE-WELSH LL
+      ,"LL", // Folded result
+
+       "Lj"  // U+01C8: LATIN CAPITAL LETTER L WITH SMALL LETTER J
+      ,"Lj", // Folded result
+
+       "⒧"  // U+24A7: PARENTHESIZED LATIN SMALL LETTER L
+      ,"(l)", // Folded result
+
+       "lj"  // U+01C9: LATIN SMALL LETTER LJ
+      ,"lj", // Folded result
+
+       "ỻ"  // U+1EFB: LATIN SMALL LETTER MIDDLE-WELSH LL
+      ,"ll", // Folded result
+
+       "ʪ"  // U+02AA: LATIN SMALL LETTER LS DIGRAPH
+      ,"ls", // Folded result
+
+       "ʫ"  // U+02AB: LATIN SMALL LETTER LZ DIGRAPH
+      ,"lz", // Folded result
+
+       "Ɯ"  // U+019C: LATIN CAPITAL LETTER TURNED M
+       + "ᴍ"  // U+1D0D: LATIN LETTER SMALL CAPITAL M
+       + "Ḿ"  // U+1E3E: LATIN CAPITAL LETTER M WITH ACUTE
+       + "Ṁ"  // U+1E40: LATIN CAPITAL LETTER M WITH DOT ABOVE
+       + "Ṃ"  // U+1E42: LATIN CAPITAL LETTER M WITH DOT BELOW
+       + "Ⓜ"  // U+24C2: CIRCLED LATIN CAPITAL LETTER M
+       + "Ɱ"  // U+2C6E: LATIN CAPITAL LETTER M WITH HOOK
+       + "ꟽ"  // U+A7FD: LATIN EPIGRAPHIC LETTER INVERTED M
+       + "ꟿ"  // U+A7FF: LATIN EPIGRAPHIC LETTER ARCHAIC M
+       + "M"  // U+FF2D: FULLWIDTH LATIN CAPITAL LETTER M
+      ,"M", // Folded result
+
+       "ɯ"  // U+026F: LATIN SMALL LETTER TURNED M
+       + "ɰ"  // U+0270: LATIN SMALL LETTER TURNED M WITH LONG LEG
+       + "ɱ"  // U+0271: LATIN SMALL LETTER M WITH HOOK
+       + "ᵯ"  // U+1D6F: LATIN SMALL LETTER M WITH MIDDLE TILDE
+       + "ᶆ"  // U+1D86: LATIN SMALL LETTER M WITH PALATAL HOOK
+       + "ḿ"  // U+1E3F: LATIN SMALL LETTER M WITH ACUTE
+       + "ṁ"  // U+1E41: LATIN SMALL LETTER M WITH DOT ABOVE
+       + "ṃ"  // U+1E43: LATIN SMALL LETTER M WITH DOT BELOW
+       + "ⓜ"  // U+24DC: CIRCLED LATIN SMALL LETTER M
+       + "m"  // U+FF4D: FULLWIDTH LATIN SMALL LETTER M
+      ,"m", // Folded result
+
+       "⒨"  // U+24A8: PARENTHESIZED LATIN SMALL LETTER M
+      ,"(m)", // Folded result
+
+       "Ñ"  // U+00D1: LATIN CAPITAL LETTER N WITH TILDE
+       + "Ń"  // U+0143: LATIN CAPITAL LETTER N WITH ACUTE
+       + "Ņ"  // U+0145: LATIN CAPITAL LETTER N WITH CEDILLA
+       + "Ň"  // U+0147: LATIN CAPITAL LETTER N WITH CARON
+       + "Ŋ"  // U+014A: LATIN CAPITAL LETTER ENG
+       + "Ɲ"  // U+019D: LATIN CAPITAL LETTER N WITH LEFT HOOK
+       + "Ǹ"  // U+01F8: LATIN CAPITAL LETTER N WITH GRAVE
+       + "Ƞ"  // U+0220: LATIN CAPITAL LETTER N WITH LONG RIGHT LEG
+       + "ɴ"  // U+0274: LATIN LETTER SMALL CAPITAL N
+       + "ᴎ"  // U+1D0E: LATIN LETTER SMALL CAPITAL REVERSED N
+       + "Ṅ"  // U+1E44: LATIN CAPITAL LETTER N WITH DOT ABOVE
+       + "Ṇ"  // U+1E46: LATIN CAPITAL LETTER N WITH DOT BELOW
+       + "Ṉ"  // U+1E48: LATIN CAPITAL LETTER N WITH LINE BELOW
+       + "Ṋ"  // U+1E4A: LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW
+       + "Ⓝ"  // U+24C3: CIRCLED LATIN CAPITAL LETTER N
+       + "N"  // U+FF2E: FULLWIDTH LATIN CAPITAL LETTER N
+      ,"N", // Folded result
+
+       "ñ"  // U+00F1: LATIN SMALL LETTER N WITH TILDE
+       + "ń"  // U+0144: LATIN SMALL LETTER N WITH ACUTE
+       + "ņ"  // U+0146: LATIN SMALL LETTER N WITH CEDILLA
+       + "ň"  // U+0148: LATIN SMALL LETTER N WITH CARON
+       + "ʼn"  // U+0149: LATIN SMALL LETTER N PRECEDED BY APOSTROPHE
+       + "ŋ"  // U+014B: LATIN SMALL LETTER ENG
+       + "ƞ"  // U+019E: LATIN SMALL LETTER N WITH LONG RIGHT LEG
+       + "ǹ"  // U+01F9: LATIN SMALL LETTER N WITH GRAVE
+       + "ȵ"  // U+0235: LATIN SMALL LETTER N WITH CURL
+       + "ɲ"  // U+0272: LATIN SMALL LETTER N WITH LEFT HOOK
+       + "ɳ"  // U+0273: LATIN SMALL LETTER N WITH RETROFLEX HOOK
+       + "ᵰ"  // U+1D70: LATIN SMALL LETTER N WITH MIDDLE TILDE
+       + "ᶇ"  // U+1D87: LATIN SMALL LETTER N WITH PALATAL HOOK
+       + "ṅ"  // U+1E45: LATIN SMALL LETTER N WITH DOT ABOVE
+       + "ṇ"  // U+1E47: LATIN SMALL LETTER N WITH DOT BELOW
+       + "ṉ"  // U+1E49: LATIN SMALL LETTER N WITH LINE BELOW
+       + "ṋ"  // U+1E4B: LATIN SMALL LETTER N WITH CIRCUMFLEX BELOW
+       + "ⁿ"  // U+207F: SUPERSCRIPT LATIN SMALL LETTER N
+       + "ⓝ"  // U+24DD: CIRCLED LATIN SMALL LETTER N
+       + "n"  // U+FF4E: FULLWIDTH LATIN SMALL LETTER N
+      ,"n", // Folded result
+
+       "NJ"  // U+01CA: LATIN CAPITAL LETTER NJ
+      ,"NJ", // Folded result
+
+       "Nj"  // U+01CB: LATIN CAPITAL LETTER N WITH SMALL LETTER J
+      ,"Nj", // Folded result
+
+       "⒩"  // U+24A9: PARENTHESIZED LATIN SMALL LETTER N
+      ,"(n)", // Folded result
+
+       "nj"  // U+01CC: LATIN SMALL LETTER NJ
+      ,"nj", // Folded result
+
+       "Ò"  // U+00D2: LATIN CAPITAL LETTER O WITH GRAVE
+       + "Ó"  // U+00D3: LATIN CAPITAL LETTER O WITH ACUTE
+       + "Ô"  // U+00D4: LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+       + "Õ"  // U+00D5: LATIN CAPITAL LETTER O WITH TILDE
+       + "Ö"  // U+00D6: LATIN CAPITAL LETTER O WITH DIAERESIS
+       + "Ø"  // U+00D8: LATIN CAPITAL LETTER O WITH STROKE
+       + "Ō"  // U+014C: LATIN CAPITAL LETTER O WITH MACRON
+       + "Ŏ"  // U+014E: LATIN CAPITAL LETTER O WITH BREVE
+       + "Ő"  // U+0150: LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
+       + "Ɔ"  // U+0186: LATIN CAPITAL LETTER OPEN O
+       + "Ɵ"  // U+019F: LATIN CAPITAL LETTER O WITH MIDDLE TILDE
+       + "Ơ"  // U+01A0: LATIN CAPITAL LETTER O WITH HORN
+       + "Ǒ"  // U+01D1: LATIN CAPITAL LETTER O WITH CARON
+       + "Ǫ"  // U+01EA: LATIN CAPITAL LETTER O WITH OGONEK
+       + "Ǭ"  // U+01EC: LATIN CAPITAL LETTER O WITH OGONEK AND MACRON
+       + "Ǿ"  // U+01FE: LATIN CAPITAL LETTER O WITH STROKE AND ACUTE
+       + "Ȍ"  // U+020C: LATIN CAPITAL LETTER O WITH DOUBLE GRAVE
+       + "Ȏ"  // U+020E: LATIN CAPITAL LETTER O WITH INVERTED BREVE
+       + "Ȫ"  // U+022A: LATIN CAPITAL LETTER O WITH DIAERESIS AND MACRON
+       + "Ȭ"  // U+022C: LATIN CAPITAL LETTER O WITH TILDE AND MACRON
+       + "Ȯ"  // U+022E: LATIN CAPITAL LETTER O WITH DOT ABOVE
+       + "Ȱ"  // U+0230: LATIN CAPITAL LETTER O WITH DOT ABOVE AND MACRON
+       + "ᴏ"  // U+1D0F: LATIN LETTER SMALL CAPITAL O
+       + "ᴐ"  // U+1D10: LATIN LETTER SMALL CAPITAL OPEN O
+       + "Ṍ"  // U+1E4C: LATIN CAPITAL LETTER O WITH TILDE AND ACUTE
+       + "Ṏ"  // U+1E4E: LATIN CAPITAL LETTER O WITH TILDE AND DIAERESIS
+       + "Ṑ"  // U+1E50: LATIN CAPITAL LETTER O WITH MACRON AND GRAVE
+       + "Ṓ"  // U+1E52: LATIN CAPITAL LETTER O WITH MACRON AND ACUTE
+       + "Ọ"  // U+1ECC: LATIN CAPITAL LETTER O WITH DOT BELOW
+       + "Ỏ"  // U+1ECE: LATIN CAPITAL LETTER O WITH HOOK ABOVE
+       + "Ố"  // U+1ED0: LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND ACUTE
+       + "Ồ"  // U+1ED2: LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND GRAVE
+       + "Ổ"  // U+1ED4: LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE
+       + "Ỗ"  // U+1ED6: LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND TILDE
+       + "Ộ"  // U+1ED8: LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND DOT BELOW
+       + "Ớ"  // U+1EDA: LATIN CAPITAL LETTER O WITH HORN AND ACUTE
+       + "Ờ"  // U+1EDC: LATIN CAPITAL LETTER O WITH HORN AND GRAVE
+       + "Ở"  // U+1EDE: LATIN CAPITAL LETTER O WITH HORN AND HOOK ABOVE
+       + "Ỡ"  // U+1EE0: LATIN CAPITAL LETTER O WITH HORN AND TILDE
+       + "Ợ"  // U+1EE2: LATIN CAPITAL LETTER O WITH HORN AND DOT BELOW
+       + "Ⓞ"  // U+24C4: CIRCLED LATIN CAPITAL LETTER O
+       + "Ꝋ"  // U+A74A: LATIN CAPITAL LETTER O WITH LONG STROKE OVERLAY
+       + "Ꝍ"  // U+A74C: LATIN CAPITAL LETTER O WITH LOOP
+       + "O"  // U+FF2F: FULLWIDTH LATIN CAPITAL LETTER O
+      ,"O", // Folded result
+
+       "ò"  // U+00F2: LATIN SMALL LETTER O WITH GRAVE
+       + "ó"  // U+00F3: LATIN SMALL LETTER O WITH ACUTE
+       + "ô"  // U+00F4: LATIN SMALL LETTER O WITH CIRCUMFLEX
+       + "õ"  // U+00F5: LATIN SMALL LETTER O WITH TILDE
+       + "ö"  // U+00F6: LATIN SMALL LETTER O WITH DIAERESIS
+       + "ø"  // U+00F8: LATIN SMALL LETTER O WITH STROKE
+       + "ō"  // U+014D: LATIN SMALL LETTER O WITH MACRON
+       + "ŏ"  // U+014F: LATIN SMALL LETTER O WITH BREVE
+       + "ő"  // U+0151: LATIN SMALL LETTER O WITH DOUBLE ACUTE
+       + "ơ"  // U+01A1: LATIN SMALL LETTER O WITH HORN
+       + "ǒ"  // U+01D2: LATIN SMALL LETTER O WITH CARON
+       + "ǫ"  // U+01EB: LATIN SMALL LETTER O WITH OGONEK
+       + "ǭ"  // U+01ED: LATIN SMALL LETTER O WITH OGONEK AND MACRON
+       + "ǿ"  // U+01FF: LATIN SMALL LETTER O WITH STROKE AND ACUTE
+       + "ȍ"  // U+020D: LATIN SMALL LETTER O WITH DOUBLE GRAVE
+       + "ȏ"  // U+020F: LATIN SMALL LETTER O WITH INVERTED BREVE
+       + "ȫ"  // U+022B: LATIN SMALL LETTER O WITH DIAERESIS AND MACRON
+       + "ȭ"  // U+022D: LATIN SMALL LETTER O WITH TILDE AND MACRON
+       + "ȯ"  // U+022F: LATIN SMALL LETTER O WITH DOT ABOVE
+       + "ȱ"  // U+0231: LATIN SMALL LETTER O WITH DOT ABOVE AND MACRON
+       + "ɔ"  // U+0254: LATIN SMALL LETTER OPEN O
+       + "ɵ"  // U+0275: LATIN SMALL LETTER BARRED O
+       + "ᴖ"  // U+1D16: LATIN SMALL LETTER TOP HALF O
+       + "ᴗ"  // U+1D17: LATIN SMALL LETTER BOTTOM HALF O
+       + "ᶗ"  // U+1D97: LATIN SMALL LETTER OPEN O WITH RETROFLEX HOOK
+       + "ṍ"  // U+1E4D: LATIN SMALL LETTER O WITH TILDE AND ACUTE
+       + "ṏ"  // U+1E4F: LATIN SMALL LETTER O WITH TILDE AND DIAERESIS
+       + "ṑ"  // U+1E51: LATIN SMALL LETTER O WITH MACRON AND GRAVE
+       + "ṓ"  // U+1E53: LATIN SMALL LETTER O WITH MACRON AND ACUTE
+       + "ọ"  // U+1ECD: LATIN SMALL LETTER O WITH DOT BELOW
+       + "ỏ"  // U+1ECF: LATIN SMALL LETTER O WITH HOOK ABOVE
+       + "ố"  // U+1ED1: LATIN SMALL LETTER O WITH CIRCUMFLEX AND ACUTE
+       + "ồ"  // U+1ED3: LATIN SMALL LETTER O WITH CIRCUMFLEX AND GRAVE
+       + "ổ"  // U+1ED5: LATIN SMALL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE
+       + "ỗ"  // U+1ED7: LATIN SMALL LETTER O WITH CIRCUMFLEX AND TILDE
+       + "ộ"  // U+1ED9: LATIN SMALL LETTER O WITH CIRCUMFLEX AND DOT BELOW
+       + "ớ"  // U+1EDB: LATIN SMALL LETTER O WITH HORN AND ACUTE
+       + "ờ"  // U+1EDD: LATIN SMALL LETTER O WITH HORN AND GRAVE
+       + "ở"  // U+1EDF: LATIN SMALL LETTER O WITH HORN AND HOOK ABOVE
+       + "ỡ"  // U+1EE1: LATIN SMALL LETTER O WITH HORN AND TILDE
+       + "ợ"  // U+1EE3: LATIN SMALL LETTER O WITH HORN AND DOT BELOW
+       + "ₒ"  // U+2092: LATIN SUBSCRIPT SMALL LETTER O
+       + "ⓞ"  // U+24DE: CIRCLED LATIN SMALL LETTER O
+       + "ⱺ"  // U+2C7A: LATIN SMALL LETTER O WITH LOW RING INSIDE
+       + "ꝋ"  // U+A74B: LATIN SMALL LETTER O WITH LONG STROKE OVERLAY
+       + "ꝍ"  // U+A74D: LATIN SMALL LETTER O WITH LOOP
+       + "o"  // U+FF4F: FULLWIDTH LATIN SMALL LETTER O
+      ,"o", // Folded result
+
+       "Œ"  // U+0152: LATIN CAPITAL LIGATURE OE
+       + "ɶ"  // U+0276: LATIN LETTER SMALL CAPITAL OE
+      ,"OE", // Folded result
+
+       "Ꝏ"  // U+A74E: LATIN CAPITAL LETTER OO
+      ,"OO", // Folded result
+
+       "Ȣ"  // U+0222: LATIN CAPITAL LETTER OU
+       + "ᴕ"  // U+1D15: LATIN LETTER SMALL CAPITAL OU
+      ,"OU", // Folded result
+
+       "⒪"  // U+24AA: PARENTHESIZED LATIN SMALL LETTER O
+      ,"(o)", // Folded result
+
+       "œ"  // U+0153: LATIN SMALL LIGATURE OE
+       + "ᴔ"  // U+1D14: LATIN SMALL LETTER TURNED OE
+      ,"oe", // Folded result
+
+       "ꝏ"  // U+A74F: LATIN SMALL LETTER OO
+      ,"oo", // Folded result
+
+       "ȣ"  // U+0223: LATIN SMALL LETTER OU
+      ,"ou", // Folded result
+
+       "Ƥ"  // U+01A4: LATIN CAPITAL LETTER P WITH HOOK
+       + "ᴘ"  // U+1D18: LATIN LETTER SMALL CAPITAL P
+       + "Ṕ"  // U+1E54: LATIN CAPITAL LETTER P WITH ACUTE
+       + "Ṗ"  // U+1E56: LATIN CAPITAL LETTER P WITH DOT ABOVE
+       + "Ⓟ"  // U+24C5: CIRCLED LATIN CAPITAL LETTER P
+       + "Ᵽ"  // U+2C63: LATIN CAPITAL LETTER P WITH STROKE
+       + "Ꝑ"  // U+A750: LATIN CAPITAL LETTER P WITH STROKE THROUGH DESCENDER
+       + "Ꝓ"  // U+A752: LATIN CAPITAL LETTER P WITH FLOURISH
+       + "Ꝕ"  // U+A754: LATIN CAPITAL LETTER P WITH SQUIRREL TAIL
+       + "P"  // U+FF30: FULLWIDTH LATIN CAPITAL LETTER P
+      ,"P", // Folded result
+
+       "ƥ"  // U+01A5: LATIN SMALL LETTER P WITH HOOK
+       + "ᵱ"  // U+1D71: LATIN SMALL LETTER P WITH MIDDLE TILDE
+       + "ᵽ"  // U+1D7D: LATIN SMALL LETTER P WITH STROKE
+       + "ᶈ"  // U+1D88: LATIN SMALL LETTER P WITH PALATAL HOOK
+       + "ṕ"  // U+1E55: LATIN SMALL LETTER P WITH ACUTE
+       + "ṗ"  // U+1E57: LATIN SMALL LETTER P WITH DOT ABOVE
+       + "ⓟ"  // U+24DF: CIRCLED LATIN SMALL LETTER P
+       + "ꝑ"  // U+A751: LATIN SMALL LETTER P WITH STROKE THROUGH DESCENDER
+       + "ꝓ"  // U+A753: LATIN SMALL LETTER P WITH FLOURISH
+       + "ꝕ"  // U+A755: LATIN SMALL LETTER P WITH SQUIRREL TAIL
+       + "ꟼ"  // U+A7FC: LATIN EPIGRAPHIC LETTER REVERSED P
+       + "p"  // U+FF50: FULLWIDTH LATIN SMALL LETTER P
+      ,"p", // Folded result
+
+       "⒫"  // U+24AB: PARENTHESIZED LATIN SMALL LETTER P
+      ,"(p)", // Folded result
+
+       "Ɋ"  // U+024A: LATIN CAPITAL LETTER SMALL Q WITH HOOK TAIL
+       + "Ⓠ"  // U+24C6: CIRCLED LATIN CAPITAL LETTER Q
+       + "Ꝗ"  // U+A756: LATIN CAPITAL LETTER Q WITH STROKE THROUGH DESCENDER
+       + "Ꝙ"  // U+A758: LATIN CAPITAL LETTER Q WITH DIAGONAL STROKE
+       + "Q"  // U+FF31: FULLWIDTH LATIN CAPITAL LETTER Q
+      ,"Q", // Folded result
+
+       "ĸ"  // U+0138: LATIN SMALL LETTER KRA
+       + "ɋ"  // U+024B: LATIN SMALL LETTER Q WITH HOOK TAIL
+       + "ʠ"  // U+02A0: LATIN SMALL LETTER Q WITH HOOK
+       + "ⓠ"  // U+24E0: CIRCLED LATIN SMALL LETTER Q
+       + "ꝗ"  // U+A757: LATIN SMALL LETTER Q WITH STROKE THROUGH DESCENDER
+       + "ꝙ"  // U+A759: LATIN SMALL LETTER Q WITH DIAGONAL STROKE
+       + "q"  // U+FF51: FULLWIDTH LATIN SMALL LETTER Q
+      ,"q", // Folded result
+
+       "⒬"  // U+24AC: PARENTHESIZED LATIN SMALL LETTER Q
+      ,"(q)", // Folded result
+
+       "ȹ"  // U+0239: LATIN SMALL LETTER QP DIGRAPH
+      ,"qp", // Folded result
+
+       "Ŕ"  // U+0154: LATIN CAPITAL LETTER R WITH ACUTE
+       + "Ŗ"  // U+0156: LATIN CAPITAL LETTER R WITH CEDILLA
+       + "Ř"  // U+0158: LATIN CAPITAL LETTER R WITH CARON
+       + "Ȑ"  // U+0210: LATIN CAPITAL LETTER R WITH DOUBLE GRAVE
+       + "Ȓ"  // U+0212: LATIN CAPITAL LETTER R WITH INVERTED BREVE
+       + "Ɍ"  // U+024C: LATIN CAPITAL LETTER R WITH STROKE
+       + "ʀ"  // U+0280: LATIN LETTER SMALL CAPITAL R
+       + "ʁ"  // U+0281: LATIN LETTER SMALL CAPITAL INVERTED R
+       + "ᴙ"  // U+1D19: LATIN LETTER SMALL CAPITAL REVERSED R
+       + "ᴚ"  // U+1D1A: LATIN LETTER SMALL CAPITAL TURNED R
+       + "Ṙ"  // U+1E58: LATIN CAPITAL LETTER R WITH DOT ABOVE
+       + "Ṛ"  // U+1E5A: LATIN CAPITAL LETTER R WITH DOT BELOW
+       + "Ṝ"  // U+1E5C: LATIN CAPITAL LETTER R WITH DOT BELOW AND MACRON
+       + "Ṟ"  // U+1E5E: LATIN CAPITAL LETTER R WITH LINE BELOW
+       + "Ⓡ"  // U+24C7: CIRCLED LATIN CAPITAL LETTER R
+       + "Ɽ"  // U+2C64: LATIN CAPITAL LETTER R WITH TAIL
+       + "Ꝛ"  // U+A75A: LATIN CAPITAL LETTER R ROTUNDA
+       + "Ꞃ"  // U+A782: LATIN CAPITAL LETTER INSULAR R
+       + "R"  // U+FF32: FULLWIDTH LATIN CAPITAL LETTER R
+      ,"R", // Folded result
+
+       "ŕ"  // U+0155: LATIN SMALL LETTER R WITH ACUTE
+       + "ŗ"  // U+0157: LATIN SMALL LETTER R WITH CEDILLA
+       + "ř"  // U+0159: LATIN SMALL LETTER R WITH CARON
+       + "ȑ"  // U+0211: LATIN SMALL LETTER R WITH DOUBLE GRAVE
+       + "ȓ"  // U+0213: LATIN SMALL LETTER R WITH INVERTED BREVE
+       + "ɍ"  // U+024D: LATIN SMALL LETTER R WITH STROKE
+       + "ɼ"  // U+027C: LATIN SMALL LETTER R WITH LONG LEG
+       + "ɽ"  // U+027D: LATIN SMALL LETTER R WITH TAIL
+       + "ɾ"  // U+027E: LATIN SMALL LETTER R WITH FISHHOOK
+       + "ɿ"  // U+027F: LATIN SMALL LETTER REVERSED R WITH FISHHOOK
+       + "ᵣ"  // U+1D63: LATIN SUBSCRIPT SMALL LETTER R
+       + "ᵲ"  // U+1D72: LATIN SMALL LETTER R WITH MIDDLE TILDE
+       + "ᵳ"  // U+1D73: LATIN SMALL LETTER R WITH FISHHOOK AND MIDDLE TILDE
+       + "ᶉ"  // U+1D89: LATIN SMALL LETTER R WITH PALATAL HOOK
+       + "ṙ"  // U+1E59: LATIN SMALL LETTER R WITH DOT ABOVE
+       + "ṛ"  // U+1E5B: LATIN SMALL LETTER R WITH DOT BELOW
+       + "ṝ"  // U+1E5D: LATIN SMALL LETTER R WITH DOT BELOW AND MACRON
+       + "ṟ"  // U+1E5F: LATIN SMALL LETTER R WITH LINE BELOW
+       + "ⓡ"  // U+24E1: CIRCLED LATIN SMALL LETTER R
+       + "ꝛ"  // U+A75B: LATIN SMALL LETTER R ROTUNDA
+       + "ꞃ"  // U+A783: LATIN SMALL LETTER INSULAR R
+       + "r"  // U+FF52: FULLWIDTH LATIN SMALL LETTER R
+      ,"r", // Folded result
+
+       "⒭"  // U+24AD: PARENTHESIZED LATIN SMALL LETTER R
+      ,"(r)", // Folded result
+
+       "Ś"  // U+015A: LATIN CAPITAL LETTER S WITH ACUTE
+       + "Ŝ"  // U+015C: LATIN CAPITAL LETTER S WITH CIRCUMFLEX
+       + "Ş"  // U+015E: LATIN CAPITAL LETTER S WITH CEDILLA
+       + "Š"  // U+0160: LATIN CAPITAL LETTER S WITH CARON
+       + "Ș"  // U+0218: LATIN CAPITAL LETTER S WITH COMMA BELOW
+       + "Ṡ"  // U+1E60: LATIN CAPITAL LETTER S WITH DOT ABOVE
+       + "Ṣ"  // U+1E62: LATIN CAPITAL LETTER S WITH DOT BELOW
+       + "Ṥ"  // U+1E64: LATIN CAPITAL LETTER S WITH ACUTE AND DOT ABOVE
+       + "Ṧ"  // U+1E66: LATIN CAPITAL LETTER S WITH CARON AND DOT ABOVE
+       + "Ṩ"  // U+1E68: LATIN CAPITAL LETTER S WITH DOT BELOW AND DOT ABOVE
+       + "Ⓢ"  // U+24C8: CIRCLED LATIN CAPITAL LETTER S
+       + "ꜱ"  // U+A731: LATIN LETTER SMALL CAPITAL S
+       + "ꞅ"  // U+A785: LATIN SMALL LETTER INSULAR S
+       + "S"  // U+FF33: FULLWIDTH LATIN CAPITAL LETTER S
+      ,"S", // Folded result
+
+       "ś"  // U+015B: LATIN SMALL LETTER S WITH ACUTE
+       + "ŝ"  // U+015D: LATIN SMALL LETTER S WITH CIRCUMFLEX
+       + "ş"  // U+015F: LATIN SMALL LETTER S WITH CEDILLA
+       + "š"  // U+0161: LATIN SMALL LETTER S WITH CARON
+       + "ſ"  // U+017F: LATIN SMALL LETTER LONG S
+       + "ș"  // U+0219: LATIN SMALL LETTER S WITH COMMA BELOW
+       + "ȿ"  // U+023F: LATIN SMALL LETTER S WITH SWASH TAIL
+       + "ʂ"  // U+0282: LATIN SMALL LETTER S WITH HOOK
+       + "ᵴ"  // U+1D74: LATIN SMALL LETTER S WITH MIDDLE TILDE
+       + "ᶊ"  // U+1D8A: LATIN SMALL LETTER S WITH PALATAL HOOK
+       + "ṡ"  // U+1E61: LATIN SMALL LETTER S WITH DOT ABOVE
+       + "ṣ"  // U+1E63: LATIN SMALL LETTER S WITH DOT BELOW
+       + "ṥ"  // U+1E65: LATIN SMALL LETTER S WITH ACUTE AND DOT ABOVE
+       + "ṧ"  // U+1E67: LATIN SMALL LETTER S WITH CARON AND DOT ABOVE
+       + "ṩ"  // U+1E69: LATIN SMALL LETTER S WITH DOT BELOW AND DOT ABOVE
+       + "ẜ"  // U+1E9C: LATIN SMALL LETTER LONG S WITH DIAGONAL STROKE
+       + "ẝ"  // U+1E9D: LATIN SMALL LETTER LONG S WITH HIGH STROKE
+       + "ⓢ"  // U+24E2: CIRCLED LATIN SMALL LETTER S
+       + "Ꞅ"  // U+A784: LATIN CAPITAL LETTER INSULAR S
+       + "s"  // U+FF53: FULLWIDTH LATIN SMALL LETTER S
+      ,"s", // Folded result
+
+       "ẞ"  // U+1E9E: LATIN CAPITAL LETTER SHARP S
+      ,"SS", // Folded result
+
+       "⒮"  // U+24AE: PARENTHESIZED LATIN SMALL LETTER S
+      ,"(s)", // Folded result
+
+       "ß"  // U+00DF: LATIN SMALL LETTER SHARP S
+      ,"ss", // Folded result
+
+       "st"  // U+FB06: LATIN SMALL LIGATURE ST
+      ,"st", // Folded result
+
+       "Ţ"  // U+0162: LATIN CAPITAL LETTER T WITH CEDILLA
+       + "Ť"  // U+0164: LATIN CAPITAL LETTER T WITH CARON
+       + "Ŧ"  // U+0166: LATIN CAPITAL LETTER T WITH STROKE
+       + "Ƭ"  // U+01AC: LATIN CAPITAL LETTER T WITH HOOK
+       + "Ʈ"  // U+01AE: LATIN CAPITAL LETTER T WITH RETROFLEX HOOK
+       + "Ț"  // U+021A: LATIN CAPITAL LETTER T WITH COMMA BELOW
+       + "Ⱦ"  // U+023E: LATIN CAPITAL LETTER T WITH DIAGONAL STROKE
+       + "ᴛ"  // U+1D1B: LATIN LETTER SMALL CAPITAL T
+       + "Ṫ"  // U+1E6A: LATIN CAPITAL LETTER T WITH DOT ABOVE
+       + "Ṭ"  // U+1E6C: LATIN CAPITAL LETTER T WITH DOT BELOW
+       + "Ṯ"  // U+1E6E: LATIN CAPITAL LETTER T WITH LINE BELOW
+       + "Ṱ"  // U+1E70: LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW
+       + "Ⓣ"  // U+24C9: CIRCLED LATIN CAPITAL LETTER T
+       + "Ꞇ"  // U+A786: LATIN CAPITAL LETTER INSULAR T
+       + "T"  // U+FF34: FULLWIDTH LATIN CAPITAL LETTER T
+      ,"T", // Folded result
+
+       "ţ"  // U+0163: LATIN SMALL LETTER T WITH CEDILLA
+       + "ť"  // U+0165: LATIN SMALL LETTER T WITH CARON
+       + "ŧ"  // U+0167: LATIN SMALL LETTER T WITH STROKE
+       + "ƫ"  // U+01AB: LATIN SMALL LETTER T WITH PALATAL HOOK
+       + "ƭ"  // U+01AD: LATIN SMALL LETTER T WITH HOOK
+       + "ț"  // U+021B: LATIN SMALL LETTER T WITH COMMA BELOW
+       + "ȶ"  // U+0236: LATIN SMALL LETTER T WITH CURL
+       + "ʇ"  // U+0287: LATIN SMALL LETTER TURNED T
+       + "ʈ"  // U+0288: LATIN SMALL LETTER T WITH RETROFLEX HOOK
+       + "ᵵ"  // U+1D75: LATIN SMALL LETTER T WITH MIDDLE TILDE
+       + "ṫ"  // U+1E6B: LATIN SMALL LETTER T WITH DOT ABOVE
+       + "ṭ"  // U+1E6D: LATIN SMALL LETTER T WITH DOT BELOW
+       + "ṯ"  // U+1E6F: LATIN SMALL LETTER T WITH LINE BELOW
+       + "ṱ"  // U+1E71: LATIN SMALL LETTER T WITH CIRCUMFLEX BELOW
+       + "ẗ"  // U+1E97: LATIN SMALL LETTER T WITH DIAERESIS
+       + "ⓣ"  // U+24E3: CIRCLED LATIN SMALL LETTER T
+       + "ⱦ"  // U+2C66: LATIN SMALL LETTER T WITH DIAGONAL STROKE
+       + "t"  // U+FF54: FULLWIDTH LATIN SMALL LETTER T
+      ,"t", // Folded result
+
+       "Þ"  // U+00DE: LATIN CAPITAL LETTER THORN
+       + "Ꝧ"  // U+A766: LATIN CAPITAL LETTER THORN WITH STROKE THROUGH DESCENDER
+      ,"TH", // Folded result
+
+       "Ꜩ"  // U+A728: LATIN CAPITAL LETTER TZ
+      ,"TZ", // Folded result
+
+       "⒯"  // U+24AF: PARENTHESIZED LATIN SMALL LETTER T
+      ,"(t)", // Folded result
+
+       "ʨ"  // U+02A8: LATIN SMALL LETTER TC DIGRAPH WITH CURL
+      ,"tc", // Folded result
+
+       "þ"  // U+00FE: LATIN SMALL LETTER THORN
+       + "ᵺ"  // U+1D7A: LATIN SMALL LETTER TH WITH STRIKETHROUGH
+       + "ꝧ"  // U+A767: LATIN SMALL LETTER THORN WITH STROKE THROUGH DESCENDER
+      ,"th", // Folded result
+
+       "ʦ"  // U+02A6: LATIN SMALL LETTER TS DIGRAPH
+      ,"ts", // Folded result
+
+       "ꜩ"  // U+A729: LATIN SMALL LETTER TZ
+      ,"tz", // Folded result
+
+       "Ù"  // U+00D9: LATIN CAPITAL LETTER U WITH GRAVE
+       + "Ú"  // U+00DA: LATIN CAPITAL LETTER U WITH ACUTE
+       + "Û"  // U+00DB: LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+       + "Ü"  // U+00DC: LATIN CAPITAL LETTER U WITH DIAERESIS
+       + "Ũ"  // U+0168: LATIN CAPITAL LETTER U WITH TILDE
+       + "Ū"  // U+016A: LATIN CAPITAL LETTER U WITH MACRON
+       + "Ŭ"  // U+016C: LATIN CAPITAL LETTER U WITH BREVE
+       + "Ů"  // U+016E: LATIN CAPITAL LETTER U WITH RING ABOVE
+       + "Ű"  // U+0170: LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
+       + "Ų"  // U+0172: LATIN CAPITAL LETTER U WITH OGONEK
+       + "Ư"  // U+01AF: LATIN CAPITAL LETTER U WITH HORN
+       + "Ǔ"  // U+01D3: LATIN CAPITAL LETTER U WITH CARON
+       + "Ǖ"  // U+01D5: LATIN CAPITAL LETTER U WITH DIAERESIS AND MACRON
+       + "Ǘ"  // U+01D7: LATIN CAPITAL LETTER U WITH DIAERESIS AND ACUTE
+       + "Ǚ"  // U+01D9: LATIN CAPITAL LETTER U WITH DIAERESIS AND CARON
+       + "Ǜ"  // U+01DB: LATIN CAPITAL LETTER U WITH DIAERESIS AND GRAVE
+       + "Ȕ"  // U+0214: LATIN CAPITAL LETTER U WITH DOUBLE GRAVE
+       + "Ȗ"  // U+0216: LATIN CAPITAL LETTER U WITH INVERTED BREVE
+       + "Ʉ"  // U+0244: LATIN CAPITAL LETTER U BAR
+       + "ᴜ"  // U+1D1C: LATIN LETTER SMALL CAPITAL U
+       + "ᵾ"  // U+1D7E: LATIN SMALL CAPITAL LETTER U WITH STROKE
+       + "Ṳ"  // U+1E72: LATIN CAPITAL LETTER U WITH DIAERESIS BELOW
+       + "Ṵ"  // U+1E74: LATIN CAPITAL LETTER U WITH TILDE BELOW
+       + "Ṷ"  // U+1E76: LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW
+       + "Ṹ"  // U+1E78: LATIN CAPITAL LETTER U WITH TILDE AND ACUTE
+       + "Ṻ"  // U+1E7A: LATIN CAPITAL LETTER U WITH MACRON AND DIAERESIS
+       + "Ụ"  // U+1EE4: LATIN CAPITAL LETTER U WITH DOT BELOW
+       + "Ủ"  // U+1EE6: LATIN CAPITAL LETTER U WITH HOOK ABOVE
+       + "Ứ"  // U+1EE8: LATIN CAPITAL LETTER U WITH HORN AND ACUTE
+       + "Ừ"  // U+1EEA: LATIN CAPITAL LETTER U WITH HORN AND GRAVE
+       + "Ử"  // U+1EEC: LATIN CAPITAL LETTER U WITH HORN AND HOOK ABOVE
+       + "Ữ"  // U+1EEE: LATIN CAPITAL LETTER U WITH HORN AND TILDE
+       + "Ự"  // U+1EF0: LATIN CAPITAL LETTER U WITH HORN AND DOT BELOW
+       + "Ⓤ"  // U+24CA: CIRCLED LATIN CAPITAL LETTER U
+       + "U"  // U+FF35: FULLWIDTH LATIN CAPITAL LETTER U
+      ,"U", // Folded result
+
+       "ù"  // U+00F9: LATIN SMALL LETTER U WITH GRAVE
+       + "ú"  // U+00FA: LATIN SMALL LETTER U WITH ACUTE
+       + "û"  // U+00FB: LATIN SMALL LETTER U WITH CIRCUMFLEX
+       + "ü"  // U+00FC: LATIN SMALL LETTER U WITH DIAERESIS
+       + "ũ"  // U+0169: LATIN SMALL LETTER U WITH TILDE
+       + "ū"  // U+016B: LATIN SMALL LETTER U WITH MACRON
+       + "ŭ"  // U+016D: LATIN SMALL LETTER U WITH BREVE
+       + "ů"  // U+016F: LATIN SMALL LETTER U WITH RING ABOVE
+       + "ű"  // U+0171: LATIN SMALL LETTER U WITH DOUBLE ACUTE
+       + "ų"  // U+0173: LATIN SMALL LETTER U WITH OGONEK
+       + "ư"  // U+01B0: LATIN SMALL LETTER U WITH HORN
+       + "ǔ"  // U+01D4: LATIN SMALL LETTER U WITH CARON
+       + "ǖ"  // U+01D6: LATIN SMALL LETTER U WITH DIAERESIS AND MACRON
+       + "ǘ"  // U+01D8: LATIN SMALL LETTER U WITH DIAERESIS AND ACUTE
+       + "ǚ"  // U+01DA: LATIN SMALL LETTER U WITH DIAERESIS AND CARON
+       + "ǜ"  // U+01DC: LATIN SMALL LETTER U WITH DIAERESIS AND GRAVE
+       + "ȕ"  // U+0215: LATIN SMALL LETTER U WITH DOUBLE GRAVE
+       + "ȗ"  // U+0217: LATIN SMALL LETTER U WITH INVERTED BREVE
+       + "ʉ"  // U+0289: LATIN SMALL LETTER U BAR
+       + "ᵤ"  // U+1D64: LATIN SUBSCRIPT SMALL LETTER U
+       + "ᶙ"  // U+1D99: LATIN SMALL LETTER U WITH RETROFLEX HOOK
+       + "ṳ"  // U+1E73: LATIN SMALL LETTER U WITH DIAERESIS BELOW
+       + "ṵ"  // U+1E75: LATIN SMALL LETTER U WITH TILDE BELOW
+       + "ṷ"  // U+1E77: LATIN SMALL LETTER U WITH CIRCUMFLEX BELOW
+       + "ṹ"  // U+1E79: LATIN SMALL LETTER U WITH TILDE AND ACUTE
+       + "ṻ"  // U+1E7B: LATIN SMALL LETTER U WITH MACRON AND DIAERESIS
+       + "ụ"  // U+1EE5: LATIN SMALL LETTER U WITH DOT BELOW
+       + "ủ"  // U+1EE7: LATIN SMALL LETTER U WITH HOOK ABOVE
+       + "ứ"  // U+1EE9: LATIN SMALL LETTER U WITH HORN AND ACUTE
+       + "ừ"  // U+1EEB: LATIN SMALL LETTER U WITH HORN AND GRAVE
+       + "ử"  // U+1EED: LATIN SMALL LETTER U WITH HORN AND HOOK ABOVE
+       + "ữ"  // U+1EEF: LATIN SMALL LETTER U WITH HORN AND TILDE
+       + "ự"  // U+1EF1: LATIN SMALL LETTER U WITH HORN AND DOT BELOW
+       + "ⓤ"  // U+24E4: CIRCLED LATIN SMALL LETTER U
+       + "u"  // U+FF55: FULLWIDTH LATIN SMALL LETTER U
+      ,"u", // Folded result
+
+       "⒰"  // U+24B0: PARENTHESIZED LATIN SMALL LETTER U
+      ,"(u)", // Folded result
+
+       "ᵫ"  // U+1D6B: LATIN SMALL LETTER UE
+      ,"ue", // Folded result
+
+       "Ʋ"  // U+01B2: LATIN CAPITAL LETTER V WITH HOOK
+       + "Ʌ"  // U+0245: LATIN CAPITAL LETTER TURNED V
+       + "ᴠ"  // U+1D20: LATIN LETTER SMALL CAPITAL V
+       + "Ṽ"  // U+1E7C: LATIN CAPITAL LETTER V WITH TILDE
+       + "Ṿ"  // U+1E7E: LATIN CAPITAL LETTER V WITH DOT BELOW
+       + "Ỽ"  // U+1EFC: LATIN CAPITAL LETTER MIDDLE-WELSH V
+       + "Ⓥ"  // U+24CB: CIRCLED LATIN CAPITAL LETTER V
+       + "Ꝟ"  // U+A75E: LATIN CAPITAL LETTER V WITH DIAGONAL STROKE
+       + "Ꝩ"  // U+A768: LATIN CAPITAL LETTER VEND
+       + "V"  // U+FF36: FULLWIDTH LATIN CAPITAL LETTER V
+      ,"V", // Folded result
+
+       "ʋ"  // U+028B: LATIN SMALL LETTER V WITH HOOK
+       + "ʌ"  // U+028C: LATIN SMALL LETTER TURNED V
+       + "ᵥ"  // U+1D65: LATIN SUBSCRIPT SMALL LETTER V
+       + "ᶌ"  // U+1D8C: LATIN SMALL LETTER V WITH PALATAL HOOK
+       + "ṽ"  // U+1E7D: LATIN SMALL LETTER V WITH TILDE
+       + "ṿ"  // U+1E7F: LATIN SMALL LETTER V WITH DOT BELOW
+       + "ⓥ"  // U+24E5: CIRCLED LATIN SMALL LETTER V
+       + "ⱱ"  // U+2C71: LATIN SMALL LETTER V WITH RIGHT HOOK
+       + "ⱴ"  // U+2C74: LATIN SMALL LETTER V WITH CURL
+       + "ꝟ"  // U+A75F: LATIN SMALL LETTER V WITH DIAGONAL STROKE
+       + "v"  // U+FF56: FULLWIDTH LATIN SMALL LETTER V
+      ,"v", // Folded result
+
+       "Ꝡ"  // U+A760: LATIN CAPITAL LETTER VY
+      ,"VY", // Folded result
+
+       "⒱"  // U+24B1: PARENTHESIZED LATIN SMALL LETTER V
+      ,"(v)", // Folded result
+
+       "ꝡ"  // U+A761: LATIN SMALL LETTER VY
+      ,"vy", // Folded result
+
+       "Ŵ"  // U+0174: LATIN CAPITAL LETTER W WITH CIRCUMFLEX
+       + "Ƿ"  // U+01F7: LATIN CAPITAL LETTER WYNN
+       + "ᴡ"  // U+1D21: LATIN LETTER SMALL CAPITAL W
+       + "Ẁ"  // U+1E80: LATIN CAPITAL LETTER W WITH GRAVE
+       + "Ẃ"  // U+1E82: LATIN CAPITAL LETTER W WITH ACUTE
+       + "Ẅ"  // U+1E84: LATIN CAPITAL LETTER W WITH DIAERESIS
+       + "Ẇ"  // U+1E86: LATIN CAPITAL LETTER W WITH DOT ABOVE
+       + "Ẉ"  // U+1E88: LATIN CAPITAL LETTER W WITH DOT BELOW
+       + "Ⓦ"  // U+24CC: CIRCLED LATIN CAPITAL LETTER W
+       + "Ⱳ"  // U+2C72: LATIN CAPITAL LETTER W WITH HOOK
+       + "W"  // U+FF37: FULLWIDTH LATIN CAPITAL LETTER W
+      ,"W", // Folded result
+
+       "ŵ"  // U+0175: LATIN SMALL LETTER W WITH CIRCUMFLEX
+       + "ƿ"  // U+01BF: LATIN LETTER WYNN
+       + "ʍ"  // U+028D: LATIN SMALL LETTER TURNED W
+       + "ẁ"  // U+1E81: LATIN SMALL LETTER W WITH GRAVE
+       + "ẃ"  // U+1E83: LATIN SMALL LETTER W WITH ACUTE
+       + "ẅ"  // U+1E85: LATIN SMALL LETTER W WITH DIAERESIS
+       + "ẇ"  // U+1E87: LATIN SMALL LETTER W WITH DOT ABOVE
+       + "ẉ"  // U+1E89: LATIN SMALL LETTER W WITH DOT BELOW
+       + "ẘ"  // U+1E98: LATIN SMALL LETTER W WITH RING ABOVE
+       + "ⓦ"  // U+24E6: CIRCLED LATIN SMALL LETTER W
+       + "ⱳ"  // U+2C73: LATIN SMALL LETTER W WITH HOOK
+       + "w"  // U+FF57: FULLWIDTH LATIN SMALL LETTER W
+      ,"w", // Folded result
+
+       "⒲"  // U+24B2: PARENTHESIZED LATIN SMALL LETTER W
+      ,"(w)", // Folded result
+
+       "Ẋ"  // U+1E8A: LATIN CAPITAL LETTER X WITH DOT ABOVE
+       + "Ẍ"  // U+1E8C: LATIN CAPITAL LETTER X WITH DIAERESIS
+       + "Ⓧ"  // U+24CD: CIRCLED LATIN CAPITAL LETTER X
+       + "X"  // U+FF38: FULLWIDTH LATIN CAPITAL LETTER X
+      ,"X", // Folded result
+
+       "ᶍ"  // U+1D8D: LATIN SMALL LETTER X WITH PALATAL HOOK
+       + "ẋ"  // U+1E8B: LATIN SMALL LETTER X WITH DOT ABOVE
+       + "ẍ"  // U+1E8D: LATIN SMALL LETTER X WITH DIAERESIS
+       + "ₓ"  // U+2093: LATIN SUBSCRIPT SMALL LETTER X
+       + "ⓧ"  // U+24E7: CIRCLED LATIN SMALL LETTER X
+       + "x"  // U+FF58: FULLWIDTH LATIN SMALL LETTER X
+      ,"x", // Folded result
+
+       "⒳"  // U+24B3: PARENTHESIZED LATIN SMALL LETTER X
+      ,"(x)", // Folded result
+
+       "Ý"  // U+00DD: LATIN CAPITAL LETTER Y WITH ACUTE
+       + "Ŷ"  // U+0176: LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
+       + "Ÿ"  // U+0178: LATIN CAPITAL LETTER Y WITH DIAERESIS
+       + "Ƴ"  // U+01B3: LATIN CAPITAL LETTER Y WITH HOOK
+       + "Ȳ"  // U+0232: LATIN CAPITAL LETTER Y WITH MACRON
+       + "Ɏ"  // U+024E: LATIN CAPITAL LETTER Y WITH STROKE
+       + "ʏ"  // U+028F: LATIN LETTER SMALL CAPITAL Y
+       + "Ẏ"  // U+1E8E: LATIN CAPITAL LETTER Y WITH DOT ABOVE
+       + "Ỳ"  // U+1EF2: LATIN CAPITAL LETTER Y WITH GRAVE
+       + "Ỵ"  // U+1EF4: LATIN CAPITAL LETTER Y WITH DOT BELOW
+       + "Ỷ"  // U+1EF6: LATIN CAPITAL LETTER Y WITH HOOK ABOVE
+       + "Ỹ"  // U+1EF8: LATIN CAPITAL LETTER Y WITH TILDE
+       + "Ỿ"  // U+1EFE: LATIN CAPITAL LETTER Y WITH LOOP
+       + "Ⓨ"  // U+24CE: CIRCLED LATIN CAPITAL LETTER Y
+       + "Y"  // U+FF39: FULLWIDTH LATIN CAPITAL LETTER Y
+      ,"Y", // Folded result
+
+       "ý"  // U+00FD: LATIN SMALL LETTER Y WITH ACUTE
+       + "ÿ"  // U+00FF: LATIN SMALL LETTER Y WITH DIAERESIS
+       + "ŷ"  // U+0177: LATIN SMALL LETTER Y WITH CIRCUMFLEX
+       + "ƴ"  // U+01B4: LATIN SMALL LETTER Y WITH HOOK
+       + "ȳ"  // U+0233: LATIN SMALL LETTER Y WITH MACRON
+       + "ɏ"  // U+024F: LATIN SMALL LETTER Y WITH STROKE
+       + "ʎ"  // U+028E: LATIN SMALL LETTER TURNED Y
+       + "ẏ"  // U+1E8F: LATIN SMALL LETTER Y WITH DOT ABOVE
+       + "ẙ"  // U+1E99: LATIN SMALL LETTER Y WITH RING ABOVE
+       + "ỳ"  // U+1EF3: LATIN SMALL LETTER Y WITH GRAVE
+       + "ỵ"  // U+1EF5: LATIN SMALL LETTER Y WITH DOT BELOW
+       + "ỷ"  // U+1EF7: LATIN SMALL LETTER Y WITH HOOK ABOVE
+       + "ỹ"  // U+1EF9: LATIN SMALL LETTER Y WITH TILDE
+       + "ỿ"  // U+1EFF: LATIN SMALL LETTER Y WITH LOOP
+       + "ⓨ"  // U+24E8: CIRCLED LATIN SMALL LETTER Y
+       + "y"  // U+FF59: FULLWIDTH LATIN SMALL LETTER Y
+      ,"y", // Folded result
+
+       "⒴"  // U+24B4: PARENTHESIZED LATIN SMALL LETTER Y
+      ,"(y)", // Folded result
+
+       "Ź"  // U+0179: LATIN CAPITAL LETTER Z WITH ACUTE
+       + "Ż"  // U+017B: LATIN CAPITAL LETTER Z WITH DOT ABOVE
+       + "Ž"  // U+017D: LATIN CAPITAL LETTER Z WITH CARON
+       + "Ƶ"  // U+01B5: LATIN CAPITAL LETTER Z WITH STROKE
+       + "Ȝ"  // U+021C: LATIN CAPITAL LETTER YOGH
+       + "Ȥ"  // U+0224: LATIN CAPITAL LETTER Z WITH HOOK
+       + "ᴢ"  // U+1D22: LATIN LETTER SMALL CAPITAL Z
+       + "Ẑ"  // U+1E90: LATIN CAPITAL LETTER Z WITH CIRCUMFLEX
+       + "Ẓ"  // U+1E92: LATIN CAPITAL LETTER Z WITH DOT BELOW
+       + "Ẕ"  // U+1E94: LATIN CAPITAL LETTER Z WITH LINE BELOW
+       + "Ⓩ"  // U+24CF: CIRCLED LATIN CAPITAL LETTER Z
+       + "Ⱬ"  // U+2C6B: LATIN CAPITAL LETTER Z WITH DESCENDER
+       + "Ꝣ"  // U+A762: LATIN CAPITAL LETTER VISIGOTHIC Z
+       + "Z"  // U+FF3A: FULLWIDTH LATIN CAPITAL LETTER Z
+      ,"Z", // Folded result
+
+       "ź"  // U+017A: LATIN SMALL LETTER Z WITH ACUTE
+       + "ż"  // U+017C: LATIN SMALL LETTER Z WITH DOT ABOVE
+       + "ž"  // U+017E: LATIN SMALL LETTER Z WITH CARON
+       + "ƶ"  // U+01B6: LATIN SMALL LETTER Z WITH STROKE
+       + "ȝ"  // U+021D: LATIN SMALL LETTER YOGH
+       + "ȥ"  // U+0225: LATIN SMALL LETTER Z WITH HOOK
+       + "ɀ"  // U+0240: LATIN SMALL LETTER Z WITH SWASH TAIL
+       + "ʐ"  // U+0290: LATIN SMALL LETTER Z WITH RETROFLEX HOOK
+       + "ʑ"  // U+0291: LATIN SMALL LETTER Z WITH CURL
+       + "ᵶ"  // U+1D76: LATIN SMALL LETTER Z WITH MIDDLE TILDE
+       + "ᶎ"  // U+1D8E: LATIN SMALL LETTER Z WITH PALATAL HOOK
+       + "ẑ"  // U+1E91: LATIN SMALL LETTER Z WITH CIRCUMFLEX
+       + "ẓ"  // U+1E93: LATIN SMALL LETTER Z WITH DOT BELOW
+       + "ẕ"  // U+1E95: LATIN SMALL LETTER Z WITH LINE BELOW
+       + "ⓩ"  // U+24E9: CIRCLED LATIN SMALL LETTER Z
+       + "ⱬ"  // U+2C6C: LATIN SMALL LETTER Z WITH DESCENDER
+       + "ꝣ"  // U+A763: LATIN SMALL LETTER VISIGOTHIC Z
+       + "z"  // U+FF5A: FULLWIDTH LATIN SMALL LETTER Z
+      ,"z", // Folded result
+
+       "⒵"  // U+24B5: PARENTHESIZED LATIN SMALL LETTER Z
+      ,"(z)", // Folded result
+
+       "⁰"  // U+2070: SUPERSCRIPT ZERO
+       + "₀"  // U+2080: SUBSCRIPT ZERO
+       + "⓪"  // U+24EA: CIRCLED DIGIT ZERO
+       + "⓿"  // U+24FF: NEGATIVE CIRCLED DIGIT ZERO
+       + "0"  // U+FF10: FULLWIDTH DIGIT ZERO
+      ,"0", // Folded result
+
+       "¹"  // U+00B9: SUPERSCRIPT ONE
+       + "₁"  // U+2081: SUBSCRIPT ONE
+       + "①"  // U+2460: CIRCLED DIGIT ONE
+       + "⓵"  // U+24F5: DOUBLE CIRCLED DIGIT ONE
+       + "❶"  // U+2776: DINGBAT NEGATIVE CIRCLED DIGIT ONE
+       + "➀"  // U+2780: DINGBAT CIRCLED SANS-SERIF DIGIT ONE
+       + "➊"  // U+278A: DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT ONE
+       + "1"  // U+FF11: FULLWIDTH DIGIT ONE
+      ,"1", // Folded result
+
+       "⒈"  // U+2488: DIGIT ONE FULL STOP
+      ,"1.", // Folded result
+
+       "⑴"  // U+2474: PARENTHESIZED DIGIT ONE
+      ,"(1)", // Folded result
+
+       "²"  // U+00B2: SUPERSCRIPT TWO
+       + "₂"  // U+2082: SUBSCRIPT TWO
+       + "②"  // U+2461: CIRCLED DIGIT TWO
+       + "⓶"  // U+24F6: DOUBLE CIRCLED DIGIT TWO
+       + "❷"  // U+2777: DINGBAT NEGATIVE CIRCLED DIGIT TWO
+       + "➁"  // U+2781: DINGBAT CIRCLED SANS-SERIF DIGIT TWO
+       + "➋"  // U+278B: DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT TWO
+       + "2"  // U+FF12: FULLWIDTH DIGIT TWO
+      ,"2", // Folded result
+
+       "⒉"  // U+2489: DIGIT TWO FULL STOP
+      ,"2.", // Folded result
+
+       "⑵"  // U+2475: PARENTHESIZED DIGIT TWO
+      ,"(2)", // Folded result
+
+       "³"  // U+00B3: SUPERSCRIPT THREE
+       + "₃"  // U+2083: SUBSCRIPT THREE
+       + "③"  // U+2462: CIRCLED DIGIT THREE
+       + "⓷"  // U+24F7: DOUBLE CIRCLED DIGIT THREE
+       + "❸"  // U+2778: DINGBAT NEGATIVE CIRCLED DIGIT THREE
+       + "➂"  // U+2782: DINGBAT CIRCLED SANS-SERIF DIGIT THREE
+       + "➌"  // U+278C: DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT THREE
+       + "3"  // U+FF13: FULLWIDTH DIGIT THREE
+      ,"3", // Folded result
+
+       "⒊"  // U+248A: DIGIT THREE FULL STOP
+      ,"3.", // Folded result
+
+       "⑶"  // U+2476: PARENTHESIZED DIGIT THREE
+      ,"(3)", // Folded result
+
+       "⁴"  // U+2074: SUPERSCRIPT FOUR
+       + "₄"  // U+2084: SUBSCRIPT FOUR
+       + "④"  // U+2463: CIRCLED DIGIT FOUR
+       + "⓸"  // U+24F8: DOUBLE CIRCLED DIGIT FOUR
+       + "❹"  // U+2779: DINGBAT NEGATIVE CIRCLED DIGIT FOUR
+       + "➃"  // U+2783: DINGBAT CIRCLED SANS-SERIF DIGIT FOUR
+       + "➍"  // U+278D: DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT FOUR
+       + "4"  // U+FF14: FULLWIDTH DIGIT FOUR
+      ,"4", // Folded result
+
+       "⒋"  // U+248B: DIGIT FOUR FULL STOP
+      ,"4.", // Folded result
+
+       "⑷"  // U+2477: PARENTHESIZED DIGIT FOUR
+      ,"(4)", // Folded result
+
+       "⁵"  // U+2075: SUPERSCRIPT FIVE
+       + "₅"  // U+2085: SUBSCRIPT FIVE
+       + "⑤"  // U+2464: CIRCLED DIGIT FIVE
+       + "⓹"  // U+24F9: DOUBLE CIRCLED DIGIT FIVE
+       + "❺"  // U+277A: DINGBAT NEGATIVE CIRCLED DIGIT FIVE
+       + "➄"  // U+2784: DINGBAT CIRCLED SANS-SERIF DIGIT FIVE
+       + "➎"  // U+278E: DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT FIVE
+       + "5"  // U+FF15: FULLWIDTH DIGIT FIVE
+      ,"5", // Folded result
+
+       "⒌"  // U+248C: DIGIT FIVE FULL STOP
+      ,"5.", // Folded result
+
+       "⑸"  // U+2478: PARENTHESIZED DIGIT FIVE
+      ,"(5)", // Folded result
+
+       "⁶"  // U+2076: SUPERSCRIPT SIX
+       + "₆"  // U+2086: SUBSCRIPT SIX
+       + "⑥"  // U+2465: CIRCLED DIGIT SIX
+       + "⓺"  // U+24FA: DOUBLE CIRCLED DIGIT SIX
+       + "❻"  // U+277B: DINGBAT NEGATIVE CIRCLED DIGIT SIX
+       + "➅"  // U+2785: DINGBAT CIRCLED SANS-SERIF DIGIT SIX
+       + "➏"  // U+278F: DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT SIX
+       + "6"  // U+FF16: FULLWIDTH DIGIT SIX
+      ,"6", // Folded result
+
+       "⒍"  // U+248D: DIGIT SIX FULL STOP
+      ,"6.", // Folded result
+
+       "⑹"  // U+2479: PARENTHESIZED DIGIT SIX
+      ,"(6)", // Folded result
+
+       "⁷"  // U+2077: SUPERSCRIPT SEVEN
+       + "₇"  // U+2087: SUBSCRIPT SEVEN
+       + "⑦"  // U+2466: CIRCLED DIGIT SEVEN
+       + "⓻"  // U+24FB: DOUBLE CIRCLED DIGIT SEVEN
+       + "❼"  // U+277C: DINGBAT NEGATIVE CIRCLED DIGIT SEVEN
+       + "➆"  // U+2786: DINGBAT CIRCLED SANS-SERIF DIGIT SEVEN
+       + "➐"  // U+2790: DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT SEVEN
+       + "7"  // U+FF17: FULLWIDTH DIGIT SEVEN
+      ,"7", // Folded result
+
+       "⒎"  // U+248E: DIGIT SEVEN FULL STOP
+      ,"7.", // Folded result
+
+       "⑺"  // U+247A: PARENTHESIZED DIGIT SEVEN
+      ,"(7)", // Folded result
+
+       "⁸"  // U+2078: SUPERSCRIPT EIGHT
+       + "₈"  // U+2088: SUBSCRIPT EIGHT
+       + "⑧"  // U+2467: CIRCLED DIGIT EIGHT
+       + "⓼"  // U+24FC: DOUBLE CIRCLED DIGIT EIGHT
+       + "❽"  // U+277D: DINGBAT NEGATIVE CIRCLED DIGIT EIGHT
+       + "➇"  // U+2787: DINGBAT CIRCLED SANS-SERIF DIGIT EIGHT
+       + "➑"  // U+2791: DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT EIGHT
+       + "8"  // U+FF18: FULLWIDTH DIGIT EIGHT
+      ,"8", // Folded result
+
+       "⒏"  // U+248F: DIGIT EIGHT FULL STOP
+      ,"8.", // Folded result
+
+       "⑻"  // U+247B: PARENTHESIZED DIGIT EIGHT
+      ,"(8)", // Folded result
+
+       "⁹"  // U+2079: SUPERSCRIPT NINE
+       + "₉"  // U+2089: SUBSCRIPT NINE
+       + "⑨"  // U+2468: CIRCLED DIGIT NINE
+       + "⓽"  // U+24FD: DOUBLE CIRCLED DIGIT NINE
+       + "❾"  // U+277E: DINGBAT NEGATIVE CIRCLED DIGIT NINE
+       + "➈"  // U+2788: DINGBAT CIRCLED SANS-SERIF DIGIT NINE
+       + "➒"  // U+2792: DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT NINE
+       + "9"  // U+FF19: FULLWIDTH DIGIT NINE
+      ,"9", // Folded result
+
+       "⒐"  // U+2490: DIGIT NINE FULL STOP
+      ,"9.", // Folded result
+
+       "⑼"  // U+247C: PARENTHESIZED DIGIT NINE
+      ,"(9)", // Folded result
+
+       "⑩"  // U+2469: CIRCLED NUMBER TEN
+       + "⓾"  // U+24FE: DOUBLE CIRCLED NUMBER TEN
+       + "❿"  // U+277F: DINGBAT NEGATIVE CIRCLED NUMBER TEN
+       + "➉"  // U+2789: DINGBAT CIRCLED SANS-SERIF NUMBER TEN
+       + "➓"  // U+2793: DINGBAT NEGATIVE CIRCLED SANS-SERIF NUMBER TEN
+      ,"10", // Folded result
+
+       "⒑"  // U+2491: NUMBER TEN FULL STOP
+      ,"10.", // Folded result
+
+       "⑽"  // U+247D: PARENTHESIZED NUMBER TEN
+      ,"(10)", // Folded result
+
+       "⑪"  // U+246A: CIRCLED NUMBER ELEVEN
+       + "⓫"  // U+24EB: NEGATIVE CIRCLED NUMBER ELEVEN
+      ,"11", // Folded result
+
+       "⒒"  // U+2492: NUMBER ELEVEN FULL STOP
+      ,"11.", // Folded result
+
+       "⑾"  // U+247E: PARENTHESIZED NUMBER ELEVEN
+      ,"(11)", // Folded result
+
+       "⑫"  // U+246B: CIRCLED NUMBER TWELVE
+       + "⓬"  // U+24EC: NEGATIVE CIRCLED NUMBER TWELVE
+      ,"12", // Folded result
+
+       "⒓"  // U+2493: NUMBER TWELVE FULL STOP
+      ,"12.", // Folded result
+
+       "⑿"  // U+247F: PARENTHESIZED NUMBER TWELVE
+      ,"(12)", // Folded result
+
+       "⑬"  // U+246C: CIRCLED NUMBER THIRTEEN
+       + "⓭"  // U+24ED: NEGATIVE CIRCLED NUMBER THIRTEEN
+      ,"13", // Folded result
+
+       "⒔"  // U+2494: NUMBER THIRTEEN FULL STOP
+      ,"13.", // Folded result
+
+       "⒀"  // U+2480: PARENTHESIZED NUMBER THIRTEEN
+      ,"(13)", // Folded result
+
+       "⑭"  // U+246D: CIRCLED NUMBER FOURTEEN
+       + "⓮"  // U+24EE: NEGATIVE CIRCLED NUMBER FOURTEEN
+      ,"14", // Folded result
+
+       "⒕"  // U+2495: NUMBER FOURTEEN FULL STOP
+      ,"14.", // Folded result
+
+       "⒁"  // U+2481: PARENTHESIZED NUMBER FOURTEEN
+      ,"(14)", // Folded result
+
+       "⑮"  // U+246E: CIRCLED NUMBER FIFTEEN
+       + "⓯"  // U+24EF: NEGATIVE CIRCLED NUMBER FIFTEEN
+      ,"15", // Folded result
+
+       "⒖"  // U+2496: NUMBER FIFTEEN FULL STOP
+      ,"15.", // Folded result
+
+       "⒂"  // U+2482: PARENTHESIZED NUMBER FIFTEEN
+      ,"(15)", // Folded result
+
+       "⑯"  // U+246F: CIRCLED NUMBER SIXTEEN
+       + "⓰"  // U+24F0: NEGATIVE CIRCLED NUMBER SIXTEEN
+      ,"16", // Folded result
+
+       "⒗"  // U+2497: NUMBER SIXTEEN FULL STOP
+      ,"16.", // Folded result
+
+       "⒃"  // U+2483: PARENTHESIZED NUMBER SIXTEEN
+      ,"(16)", // Folded result
+
+       "⑰"  // U+2470: CIRCLED NUMBER SEVENTEEN
+       + "⓱"  // U+24F1: NEGATIVE CIRCLED NUMBER SEVENTEEN
+      ,"17", // Folded result
+
+       "⒘"  // U+2498: NUMBER SEVENTEEN FULL STOP
+      ,"17.", // Folded result
+
+       "⒄"  // U+2484: PARENTHESIZED NUMBER SEVENTEEN
+      ,"(17)", // Folded result
+
+       "⑱"  // U+2471: CIRCLED NUMBER EIGHTEEN
+       + "⓲"  // U+24F2: NEGATIVE CIRCLED NUMBER EIGHTEEN
+      ,"18", // Folded result
+
+       "⒙"  // U+2499: NUMBER EIGHTEEN FULL STOP
+      ,"18.", // Folded result
+
+       "⒅"  // U+2485: PARENTHESIZED NUMBER EIGHTEEN
+      ,"(18)", // Folded result
+
+       "⑲"  // U+2472: CIRCLED NUMBER NINETEEN
+       + "⓳"  // U+24F3: NEGATIVE CIRCLED NUMBER NINETEEN
+      ,"19", // Folded result
+
+       "⒚"  // U+249A: NUMBER NINETEEN FULL STOP
+      ,"19.", // Folded result
+
+       "⒆"  // U+2486: PARENTHESIZED NUMBER NINETEEN
+      ,"(19)", // Folded result
+
+       "⑳"  // U+2473: CIRCLED NUMBER TWENTY
+       + "⓴"  // U+24F4: NEGATIVE CIRCLED NUMBER TWENTY
+      ,"20", // Folded result
+
+       "⒛"  // U+249B: NUMBER TWENTY FULL STOP
+      ,"20.", // Folded result
+
+       "⒇"  // U+2487: PARENTHESIZED NUMBER TWENTY
+      ,"(20)", // Folded result
+
+       "«"  // U+00AB: LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+       + "»"  // U+00BB: RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+       + "“"  // U+201C: LEFT DOUBLE QUOTATION MARK
+       + "”"  // U+201D: RIGHT DOUBLE QUOTATION MARK
+       + "„"  // U+201E: DOUBLE LOW-9 QUOTATION MARK
+       + "″"  // U+2033: DOUBLE PRIME
+       + "‶"  // U+2036: REVERSED DOUBLE PRIME
+       + "❝"  // U+275D: HEAVY DOUBLE TURNED COMMA QUOTATION MARK ORNAMENT
+       + "❞"  // U+275E: HEAVY DOUBLE COMMA QUOTATION MARK ORNAMENT
+       + "❮"  // U+276E: HEAVY LEFT-POINTING ANGLE QUOTATION MARK ORNAMENT
+       + "❯"  // U+276F: HEAVY RIGHT-POINTING ANGLE QUOTATION MARK ORNAMENT
+       + """  // U+FF02: FULLWIDTH QUOTATION MARK
+      ,"\"", // Folded result
+
+       "‘"  // U+2018: LEFT SINGLE QUOTATION MARK
+       + "’"  // U+2019: RIGHT SINGLE QUOTATION MARK
+       + "‚"  // U+201A: SINGLE LOW-9 QUOTATION MARK
+       + "‛"  // U+201B: SINGLE HIGH-REVERSED-9 QUOTATION MARK
+       + "′"  // U+2032: PRIME
+       + "‵"  // U+2035: REVERSED PRIME
+       + "‹"  // U+2039: SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+       + "›"  // U+203A: SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+       + "❛"  // U+275B: HEAVY SINGLE TURNED COMMA QUOTATION MARK ORNAMENT
+       + "❜"  // U+275C: HEAVY SINGLE COMMA QUOTATION MARK ORNAMENT
+       + "'"  // U+FF07: FULLWIDTH APOSTROPHE
+      ,"'", // Folded result
+
+       "‐"  // U+2010: HYPHEN
+       + "‑"  // U+2011: NON-BREAKING HYPHEN
+       + "‒"  // U+2012: FIGURE DASH
+       + "–"  // U+2013: EN DASH
+       + "—"  // U+2014: EM DASH
+       + "⁻"  // U+207B: SUPERSCRIPT MINUS
+       + "₋"  // U+208B: SUBSCRIPT MINUS
+       + "-"  // U+FF0D: FULLWIDTH HYPHEN-MINUS
+      ,"-", // Folded result
+
+       "⁅"  // U+2045: LEFT SQUARE BRACKET WITH QUILL
+       + "❲"  // U+2772: LIGHT LEFT TORTOISE SHELL BRACKET ORNAMENT
+       + "["  // U+FF3B: FULLWIDTH LEFT SQUARE BRACKET
+      ,"[", // Folded result
+
+       "⁆"  // U+2046: RIGHT SQUARE BRACKET WITH QUILL
+       + "❳"  // U+2773: LIGHT RIGHT TORTOISE SHELL BRACKET ORNAMENT
+       + "]"  // U+FF3D: FULLWIDTH RIGHT SQUARE BRACKET
+      ,"]", // Folded result
+
+       "⁽"  // U+207D: SUPERSCRIPT LEFT PARENTHESIS
+       + "₍"  // U+208D: SUBSCRIPT LEFT PARENTHESIS
+       + "❨"  // U+2768: MEDIUM LEFT PARENTHESIS ORNAMENT
+       + "❪"  // U+276A: MEDIUM FLATTENED LEFT PARENTHESIS ORNAMENT
+       + "("  // U+FF08: FULLWIDTH LEFT PARENTHESIS
+      ,"(", // Folded result
+
+       "⸨"  // U+2E28: LEFT DOUBLE PARENTHESIS
+      ,"((", // Folded result
+
+       "⁾"  // U+207E: SUPERSCRIPT RIGHT PARENTHESIS
+       + "₎"  // U+208E: SUBSCRIPT RIGHT PARENTHESIS
+       + "❩"  // U+2769: MEDIUM RIGHT PARENTHESIS ORNAMENT
+       + "❫"  // U+276B: MEDIUM FLATTENED RIGHT PARENTHESIS ORNAMENT
+       + ")"  // U+FF09: FULLWIDTH RIGHT PARENTHESIS
+      ,")", // Folded result
+
+       "⸩"  // U+2E29: RIGHT DOUBLE PARENTHESIS
+      ,"))", // Folded result
+
+       "❬"  // U+276C: MEDIUM LEFT-POINTING ANGLE BRACKET ORNAMENT
+       + "❰"  // U+2770: HEAVY LEFT-POINTING ANGLE BRACKET ORNAMENT
+       + "<"  // U+FF1C: FULLWIDTH LESS-THAN SIGN
+      ,"<", // Folded result
+
+       "❭"  // U+276D: MEDIUM RIGHT-POINTING ANGLE BRACKET ORNAMENT
+       + "❱"  // U+2771: HEAVY RIGHT-POINTING ANGLE BRACKET ORNAMENT
+       + ">"  // U+FF1E: FULLWIDTH GREATER-THAN SIGN
+      ,">", // Folded result
+
+       "❴"  // U+2774: MEDIUM LEFT CURLY BRACKET ORNAMENT
+       + "{"  // U+FF5B: FULLWIDTH LEFT CURLY BRACKET
+      ,"{", // Folded result
+
+       "❵"  // U+2775: MEDIUM RIGHT CURLY BRACKET ORNAMENT
+       + "}"  // U+FF5D: FULLWIDTH RIGHT CURLY BRACKET
+      ,"}", // Folded result
+
+       "⁺"  // U+207A: SUPERSCRIPT PLUS SIGN
+       + "₊"  // U+208A: SUBSCRIPT PLUS SIGN
+       + "+"  // U+FF0B: FULLWIDTH PLUS SIGN
+      ,"+", // Folded result
+
+       "⁼"  // U+207C: SUPERSCRIPT EQUALS SIGN
+       + "₌"  // U+208C: SUBSCRIPT EQUALS SIGN
+       + "="  // U+FF1D: FULLWIDTH EQUALS SIGN
+      ,"=", // Folded result
+
+       "!"  // U+FF01: FULLWIDTH EXCLAMATION MARK
+      ,"!", // Folded result
+
+       "‼"  // U+203C: DOUBLE EXCLAMATION MARK
+      ,"!!", // Folded result
+
+       "⁉"  // U+2049: EXCLAMATION QUESTION MARK
+      ,"!?", // Folded result
+
+       "#"  // U+FF03: FULLWIDTH NUMBER SIGN
+      ,"#", // Folded result
+
+       "$"  // U+FF04: FULLWIDTH DOLLAR SIGN
+      ,"$", // Folded result
+
+       "⁒"  // U+2052: COMMERCIAL MINUS SIGN
+       + "%"  // U+FF05: FULLWIDTH PERCENT SIGN
+      ,"%", // Folded result
+
+       "&"  // U+FF06: FULLWIDTH AMPERSAND
+      ,"&", // Folded result
+
+       "⁎"  // U+204E: LOW ASTERISK
+       + "*"  // U+FF0A: FULLWIDTH ASTERISK
+      ,"*", // Folded result
+
+       ","  // U+FF0C: FULLWIDTH COMMA
+      ,",", // Folded result
+
+       "."  // U+FF0E: FULLWIDTH FULL STOP
+      ,".", // Folded result
+
+       "⁄"  // U+2044: FRACTION SLASH
+       + "/"  // U+FF0F: FULLWIDTH SOLIDUS
+      ,"/", // Folded result
+
+       ":"  // U+FF1A: FULLWIDTH COLON
+      ,":", // Folded result
+
+       "⁏"  // U+204F: REVERSED SEMICOLON
+       + ";"  // U+FF1B: FULLWIDTH SEMICOLON
+      ,";", // Folded result
+
+       "?"  // U+FF1F: FULLWIDTH QUESTION MARK
+      ,"?", // Folded result
+
+       "⁇"  // U+2047: DOUBLE QUESTION MARK
+      ,"??", // Folded result
+
+       "⁈"  // U+2048: QUESTION EXCLAMATION MARK
+      ,"?!", // Folded result
+
+       "@"  // U+FF20: FULLWIDTH COMMERCIAL AT
+      ,"@", // Folded result
+
+       "\"  // U+FF3C: FULLWIDTH REVERSE SOLIDUS
+      ,"\\", // Folded result
+
+       "‸"  // U+2038: CARET
+       + "^"  // U+FF3E: FULLWIDTH CIRCUMFLEX ACCENT
+      ,"^", // Folded result
+
+       "_"  // U+FF3F: FULLWIDTH LOW LINE
+      ,"_", // Folded result
+
+       "⁓"  // U+2053: SWUNG DASH
+       + "~"  // U+FF5E: FULLWIDTH TILDE
+      ,"~", // Folded result
+    };
+
+    // Construct input text and expected output tokens
+    List<String> expectedOutputTokens = new ArrayList<String>();
+    StringBuilder inputText = new StringBuilder();
+    for (int n = 0 ; n < foldings.length ; n += 2) {
+      if (n > 0) {
+        inputText.append(' ');  // Space between tokens
+      }
+      inputText.append(foldings[n]);
+
+      // Construct the expected output token: the ASCII string to fold to,
+      // duplicated as many times as the number of characters in the input text.
+      StringBuilder expected = new StringBuilder();
+      int numChars = foldings[n].length();
+      for (int m = 0 ; m < numChars; ++m) {
+        expected.append(foldings[n + 1]);
+      }
+      expectedOutputTokens.add(expected.toString());
+    }
+
+    TokenStream stream = new MockTokenizer(new StringReader(inputText.toString()), MockTokenizer.WHITESPACE, false);
+    ASCIIFoldingFilter filter = new ASCIIFoldingFilter(stream);
+    CharTermAttribute termAtt = filter.getAttribute(CharTermAttribute.class);
+    Iterator<String> expectedIter = expectedOutputTokens.iterator();
+    filter.reset();
+    while (expectedIter.hasNext()) {
+      assertTermEquals(expectedIter.next(), filter, termAtt);
+    }
+    assertFalse(filter.incrementToken());
+  }
+  
+  void assertTermEquals(String expected, TokenStream stream, CharTermAttribute termAtt) throws Exception {
+    assertTrue(stream.incrementToken());
+    assertEquals(expected, termAtt.toString());
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestAnalyzers.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestAnalyzers.java
new file mode 100644
index 0000000..c97da67
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestAnalyzers.java
@@ -0,0 +1,251 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.Reader;
+
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.index.Payload;
+
+public class TestAnalyzers extends BaseTokenStreamTestCase {
+
+  public void testSimple() throws Exception {
+    Analyzer a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
+    assertAnalyzesTo(a, "foo bar FOO BAR", 
+                     new String[] { "foo", "bar", "foo", "bar" });
+    assertAnalyzesTo(a, "foo      bar .  FOO <> BAR", 
+                     new String[] { "foo", "bar", "foo", "bar" });
+    assertAnalyzesTo(a, "foo.bar.FOO.BAR", 
+                     new String[] { "foo", "bar", "foo", "bar" });
+    assertAnalyzesTo(a, "U.S.A.", 
+                     new String[] { "u", "s", "a" });
+    assertAnalyzesTo(a, "C++", 
+                     new String[] { "c" });
+    assertAnalyzesTo(a, "B2B", 
+                     new String[] { "b", "b" });
+    assertAnalyzesTo(a, "2B", 
+                     new String[] { "b" });
+    assertAnalyzesTo(a, "\"QUOTED\" word", 
+                     new String[] { "quoted", "word" });
+  }
+
+  public void testNull() throws Exception {
+    Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
+    assertAnalyzesTo(a, "foo bar FOO BAR", 
+                     new String[] { "foo", "bar", "FOO", "BAR" });
+    assertAnalyzesTo(a, "foo      bar .  FOO <> BAR", 
+                     new String[] { "foo", "bar", ".", "FOO", "<>", "BAR" });
+    assertAnalyzesTo(a, "foo.bar.FOO.BAR", 
+                     new String[] { "foo.bar.FOO.BAR" });
+    assertAnalyzesTo(a, "U.S.A.", 
+                     new String[] { "U.S.A." });
+    assertAnalyzesTo(a, "C++", 
+                     new String[] { "C++" });
+    assertAnalyzesTo(a, "B2B", 
+                     new String[] { "B2B" });
+    assertAnalyzesTo(a, "2B", 
+                     new String[] { "2B" });
+    assertAnalyzesTo(a, "\"QUOTED\" word", 
+                     new String[] { "\"QUOTED\"", "word" });
+  }
+
+  public void testStop() throws Exception {
+    Analyzer a = new StopAnalyzer(TEST_VERSION_CURRENT);
+    assertAnalyzesTo(a, "foo bar FOO BAR", 
+                     new String[] { "foo", "bar", "foo", "bar" });
+    assertAnalyzesTo(a, "foo a bar such FOO THESE BAR", 
+                     new String[] { "foo", "bar", "foo", "bar" });
+  }
+
+  void verifyPayload(TokenStream ts) throws IOException {
+    PayloadAttribute payloadAtt = ts.getAttribute(PayloadAttribute.class);
+    for(byte b=1;;b++) {
+      boolean hasNext = ts.incrementToken();
+      if (!hasNext) break;
+      // System.out.println("id="+System.identityHashCode(nextToken) + " " + t);
+      // System.out.println("payload=" + (int)nextToken.getPayload().toByteArray()[0]);
+      assertEquals(b, payloadAtt.getPayload().toByteArray()[0]);
+    }
+  }
+
+  // Make sure old style next() calls result in a new copy of payloads
+  public void testPayloadCopy() throws IOException {
+    String s = "how now brown cow";
+    TokenStream ts;
+    ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(s));
+    ts = new PayloadSetter(ts);
+    verifyPayload(ts);
+
+    ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(s));
+    ts = new PayloadSetter(ts);
+    verifyPayload(ts);
+  }
+
+  // LUCENE-1150: Just a compile time test, to ensure the
+  // StandardAnalyzer constants remain publicly accessible
+  @SuppressWarnings("unused")
+  public void _testStandardConstants() {
+    int x = StandardTokenizer.ALPHANUM;
+    x = StandardTokenizer.APOSTROPHE;
+    x = StandardTokenizer.ACRONYM;
+    x = StandardTokenizer.COMPANY;
+    x = StandardTokenizer.EMAIL;
+    x = StandardTokenizer.HOST;
+    x = StandardTokenizer.NUM;
+    x = StandardTokenizer.CJ;
+    String[] y = StandardTokenizer.TOKEN_TYPES;
+  }
+
+  private static class LowerCaseWhitespaceAnalyzer extends Analyzer {
+
+    @Override
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      return new LowerCaseFilter(TEST_VERSION_CURRENT,
+          new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader));
+    }
+    
+  }
+  
+  /**
+   * @deprecated remove this when lucene 3.0 "broken unicode 4" support
+   * is no longer needed.
+   */
+  @Deprecated
+  private static class LowerCaseWhitespaceAnalyzerBWComp extends Analyzer {
+
+    @Override
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      return new LowerCaseFilter(new WhitespaceTokenizer(reader));
+    }
+    
+  }
+  
+  /**
+   * Test that LowercaseFilter handles entire unicode range correctly
+   */
+  public void testLowerCaseFilter() throws IOException {
+    Analyzer a = new LowerCaseWhitespaceAnalyzer();
+    // BMP
+    assertAnalyzesTo(a, "AbaCaDabA", new String[] { "abacadaba" });
+    // supplementary
+    assertAnalyzesTo(a, "\ud801\udc16\ud801\udc16\ud801\udc16\ud801\udc16",
+        new String[] {"\ud801\udc3e\ud801\udc3e\ud801\udc3e\ud801\udc3e"});
+    assertAnalyzesTo(a, "AbaCa\ud801\udc16DabA", 
+        new String[] { "abaca\ud801\udc3edaba" });
+    // unpaired lead surrogate
+    assertAnalyzesTo(a, "AbaC\uD801AdaBa", 
+        new String [] { "abac\uD801adaba" });
+    // unpaired trail surrogate
+    assertAnalyzesTo(a, "AbaC\uDC16AdaBa", 
+        new String [] { "abac\uDC16adaba" });
+  }
+  
+  /**
+   * Test that LowercaseFilter handles the lowercasing correctly if the term
+   * buffer has a trailing surrogate character leftover and the current term in
+   * the buffer ends with a corresponding leading surrogate.
+   */
+  public void testLowerCaseFilterLowSurrogateLeftover() throws IOException {
+    // test if the limit of the termbuffer is correctly used with supplementary
+    // chars
+    WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, 
+        new StringReader("BogustermBogusterm\udc16"));
+    LowerCaseFilter filter = new LowerCaseFilter(TEST_VERSION_CURRENT,
+        tokenizer);
+    assertTokenStreamContents(filter, new String[] {"bogustermbogusterm\udc16"});
+    filter.reset();
+    String highSurEndingUpper = "BogustermBoguster\ud801";
+    String highSurEndingLower = "bogustermboguster\ud801";
+    tokenizer.reset(new StringReader(highSurEndingUpper));
+    assertTokenStreamContents(filter, new String[] {highSurEndingLower});
+    assertTrue(filter.hasAttribute(CharTermAttribute.class));
+    char[] termBuffer = filter.getAttribute(CharTermAttribute.class).buffer();
+    int length = highSurEndingLower.length();
+    assertEquals('\ud801', termBuffer[length - 1]);
+    assertEquals('\udc3e', termBuffer[length]);
+    
+  }
+  
+  public void testLimitTokenCountAnalyzer() throws IOException {
+    Analyzer a = new LimitTokenCountAnalyzer(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2);
+    // dont use assertAnalyzesTo here, as the end offset is not the end of the string!
+    assertTokenStreamContents(a.tokenStream("dummy", new StringReader("1  2     3  4  5")), new String[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, 4);
+    assertTokenStreamContents(a.reusableTokenStream("dummy", new StringReader("1 2 3 4 5")), new String[] { "1", "2" }, new int[] { 0, 2 }, new int[] { 1, 3 }, 3);
+    
+    a = new LimitTokenCountAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT), 2);
+    // dont use assertAnalyzesTo here, as the end offset is not the end of the string!
+    assertTokenStreamContents(a.tokenStream("dummy", new StringReader("1 2 3 4 5")), new String[] { "1", "2" }, new int[] { 0, 2 }, new int[] { 1, 3 }, 3);
+    assertTokenStreamContents(a.reusableTokenStream("dummy", new StringReader("1 2 3 4 5")), new String[] { "1", "2" }, new int[] { 0, 2 }, new int[] { 1, 3 }, 3);
+  }
+  
+  /**
+   * Test that LowercaseFilter only works on BMP for back compat,
+   * depending upon version
+   * @deprecated remove this test when lucene 3.0 "broken unicode 4" support
+   * is no longer needed.
+   */
+  @Deprecated
+  public void testLowerCaseFilterBWComp() throws IOException {
+    Analyzer a = new LowerCaseWhitespaceAnalyzerBWComp();
+    // BMP
+    assertAnalyzesTo(a, "AbaCaDabA", new String[] { "abacadaba" });
+    // supplementary, no-op
+    assertAnalyzesTo(a, "\ud801\udc16\ud801\udc16\ud801\udc16\ud801\udc16",
+        new String[] {"\ud801\udc16\ud801\udc16\ud801\udc16\ud801\udc16"});
+    assertAnalyzesTo(a, "AbaCa\ud801\udc16DabA",
+        new String[] { "abaca\ud801\udc16daba" });
+    // unpaired lead surrogate
+    assertAnalyzesTo(a, "AbaC\uD801AdaBa", 
+        new String [] { "abac\uD801adaba" });
+    // unpaired trail surrogate
+    assertAnalyzesTo(a, "AbaC\uDC16AdaBa", 
+        new String [] { "abac\uDC16adaba" });
+  }
+
+  /** blast some random strings through the analyzer */
+  public void testRandomStrings() throws Exception {
+    checkRandomData(random, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER);
+    checkRandomData(random, new SimpleAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER);
+    checkRandomData(random, new StopAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER);
+  } 
+}
+
+final class PayloadSetter extends TokenFilter {
+  PayloadAttribute payloadAtt;
+  public  PayloadSetter(TokenStream input) {
+    super(input);
+    payloadAtt = addAttribute(PayloadAttribute.class);
+  }
+
+  byte[] data = new byte[1];
+  Payload p = new Payload(data,0,1);
+
+  @Override
+  public boolean incrementToken() throws IOException {
+    boolean hasNext = input.incrementToken();
+    if (!hasNext) return false;
+    payloadAtt.setPayload(p);  // reuse the payload / byte[]
+    data[0]++;
+    return true;
+  }
+}
\ No newline at end of file
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
new file mode 100644
index 0000000..3a122cd
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
@@ -0,0 +1,110 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermPositions;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.store.Directory;
+
+public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
+  private String[] tokens = new String[] {"term1", "term2", "term3", "term2"};
+  
+  public void testCaching() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir);
+
+    Document doc = new Document();
+    TokenStream stream = new TokenStream() {
+      private int index = 0;
+      private CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+      private OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+      
+      @Override
+      public boolean incrementToken() throws IOException {
+        if (index == tokens.length) {
+          return false;
+        } else {
+          clearAttributes();
+          termAtt.append(tokens[index++]);
+          offsetAtt.setOffset(0,0);
+          return true;
+        }        
+      }
+      
+    };
+    
+    stream = new CachingTokenFilter(stream);
+    
+    doc.add(new Field("preanalyzed", stream, TermVector.NO));
+    
+    // 1) we consume all tokens twice before we add the doc to the index
+    checkTokens(stream);
+    stream.reset();  
+    checkTokens(stream);
+    
+    // 2) now add the document to the index and verify if all tokens are indexed
+    //    don't reset the stream here, the DocumentWriter should do that implicitly
+    writer.addDocument(doc);
+    
+    IndexReader reader = writer.getReader();
+    TermPositions termPositions = reader.termPositions(new Term("preanalyzed", "term1"));
+    assertTrue(termPositions.next());
+    assertEquals(1, termPositions.freq());
+    assertEquals(0, termPositions.nextPosition());
+
+    termPositions.seek(new Term("preanalyzed", "term2"));
+    assertTrue(termPositions.next());
+    assertEquals(2, termPositions.freq());
+    assertEquals(1, termPositions.nextPosition());
+    assertEquals(3, termPositions.nextPosition());
+    
+    termPositions.seek(new Term("preanalyzed", "term3"));
+    assertTrue(termPositions.next());
+    assertEquals(1, termPositions.freq());
+    assertEquals(2, termPositions.nextPosition());
+    reader.close();
+    writer.close();
+    // 3) reset stream and consume tokens again
+    stream.reset();
+    checkTokens(stream);
+    dir.close();
+  }
+  
+  private void checkTokens(TokenStream stream) throws IOException {
+    int count = 0;
+    
+    CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
+    while (stream.incrementToken()) {
+      assertTrue(count < tokens.length);
+      assertEquals(tokens[count], termAtt.toString());
+      count++;
+    }
+    
+    assertEquals(tokens.length, count);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestCharArrayMap.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestCharArrayMap.java
new file mode 100644
index 0000000..c18830d
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestCharArrayMap.java
@@ -0,0 +1,248 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.analysis;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Locale;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestCharArrayMap extends LuceneTestCase {
+
+  public void doRandom(int iter, boolean ignoreCase) {
+    CharArrayMap<Integer> map = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, 1, ignoreCase);
+    HashMap<String,Integer> hmap = new HashMap<String,Integer>();
+
+    char[] key;
+    for (int i=0; i<iter; i++) {
+      int len = random.nextInt(5);
+      key = new char[len];
+      for (int j=0; j<key.length; j++) {
+        key[j] = (char)random.nextInt(127);
+      }
+      String keyStr = new String(key);
+      String hmapKey = ignoreCase ? keyStr.toLowerCase(Locale.ENGLISH) : keyStr; 
+
+      int val = random.nextInt();
+
+      Object o1 = map.put(key, val);
+      Object o2 = hmap.put(hmapKey,val);
+      assertEquals(o1,o2);
+
+      // add it again with the string method
+      assertEquals(val, map.put(keyStr,val).intValue());
+
+      assertEquals(val, map.get(key,0,key.length).intValue());
+      assertEquals(val, map.get(key).intValue());
+      assertEquals(val, map.get(keyStr).intValue());
+
+      assertEquals(hmap.size(), map.size());
+    }
+  }
+
+  public void testCharArrayMap() {
+    int num = 5 * RANDOM_MULTIPLIER;
+    for (int i = 0; i < num; i++) { // pump this up for more random testing
+      doRandom(1000,false);
+      doRandom(1000,true);      
+    }
+  }
+
+  public void testMethods() {
+    CharArrayMap<Integer> cm = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, 2, false);
+    HashMap<String,Integer> hm = new HashMap<String,Integer>();
+    hm.put("foo",1);
+    hm.put("bar",2);
+    cm.putAll(hm);
+    assertEquals(hm.size(), cm.size());
+    hm.put("baz", 3);
+    cm.putAll(hm);
+    assertEquals(hm.size(), cm.size());
+
+    CharArraySet cs = cm.keySet();
+    int n=0;
+    for (Object o : cs) {
+      assertTrue(cm.containsKey(o));
+      char[] co = (char[]) o;
+      assertTrue(cm.containsKey(co, 0, co.length));
+      n++;
+    }
+    assertEquals(hm.size(), n);
+    assertEquals(hm.size(), cs.size());
+    assertEquals(cm.size(), cs.size());
+    cs.clear();
+    assertEquals(0, cs.size());
+    assertEquals(0, cm.size());
+    try {
+      cs.add("test");
+      fail("keySet() allows adding new keys");
+    } catch (UnsupportedOperationException ue) {
+      // pass
+    }
+    cm.putAll(hm);
+    assertEquals(hm.size(), cs.size());
+    assertEquals(cm.size(), cs.size());
+
+    Iterator<Map.Entry<Object,Integer>> iter1 = cm.entrySet().iterator();
+    n=0;
+    while (iter1.hasNext()) {
+      Map.Entry<Object,Integer> entry = iter1.next();
+      Object key = entry.getKey();
+      Integer val = entry.getValue();
+      assertEquals(cm.get(key), val);
+      entry.setValue(val*100);
+      assertEquals(val*100, (int)cm.get(key));
+      n++;
+    }
+    assertEquals(hm.size(), n);
+    cm.clear();
+    cm.putAll(hm);
+    assertEquals(cm.size(), n);
+
+    CharArrayMap<Integer>.EntryIterator iter2 = cm.entrySet().iterator();
+    n=0;
+    while (iter2.hasNext()) {
+      char[] keyc = iter2.nextKey();
+      Integer val = iter2.currentValue();
+      assertEquals(hm.get(new String(keyc)), val);
+      iter2.setValue(val*100);
+      assertEquals(val*100, (int)cm.get(keyc));
+      n++;
+    }
+    assertEquals(hm.size(), n);
+
+    cm.entrySet().clear();
+    assertEquals(0, cm.size());
+    assertEquals(0, cm.entrySet().size());
+    assertTrue(cm.isEmpty());
+  }
+
+  public void testModifyOnUnmodifiable(){
+    CharArrayMap<Integer> map = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, 2, false);
+    map.put("foo",1);
+    map.put("bar",2);
+    final int size = map.size();
+    assertEquals(2, size);
+    assertTrue(map.containsKey("foo"));  
+    assertEquals(1, map.get("foo").intValue());  
+    assertTrue(map.containsKey("bar"));  
+    assertEquals(2, map.get("bar").intValue());  
+
+    map = CharArrayMap.unmodifiableMap(map);
+    assertEquals("Map size changed due to unmodifiableMap call" , size, map.size());
+    String NOT_IN_MAP = "SirGallahad";
+    assertFalse("Test String already exists in map", map.containsKey(NOT_IN_MAP));
+    assertNull("Test String already exists in map", map.get(NOT_IN_MAP));
+    
+    try{
+      map.put(NOT_IN_MAP.toCharArray(), 3);  
+      fail("Modified unmodifiable map");
+    }catch (UnsupportedOperationException e) {
+      // expected
+      assertFalse("Test String has been added to unmodifiable map", map.containsKey(NOT_IN_MAP));
+      assertNull("Test String has been added to unmodifiable map", map.get(NOT_IN_MAP));
+      assertEquals("Size of unmodifiable map has changed", size, map.size());
+    }
+    
+    try{
+      map.put(NOT_IN_MAP, 3);  
+      fail("Modified unmodifiable map");
+    }catch (UnsupportedOperationException e) {
+      // expected
+      assertFalse("Test String has been added to unmodifiable map", map.containsKey(NOT_IN_MAP));
+      assertNull("Test String has been added to unmodifiable map", map.get(NOT_IN_MAP));
+      assertEquals("Size of unmodifiable map has changed", size, map.size());
+    }
+    
+    try{
+      map.put(new StringBuilder(NOT_IN_MAP), 3);  
+      fail("Modified unmodifiable map");
+    }catch (UnsupportedOperationException e) {
+      // expected
+      assertFalse("Test String has been added to unmodifiable map", map.containsKey(NOT_IN_MAP));
+      assertNull("Test String has been added to unmodifiable map", map.get(NOT_IN_MAP));
+      assertEquals("Size of unmodifiable map has changed", size, map.size());
+    }
+    
+    try{
+      map.clear();  
+      fail("Modified unmodifiable map");
+    }catch (UnsupportedOperationException e) {
+      // expected
+      assertEquals("Size of unmodifiable map has changed", size, map.size());
+    }
+    
+    try{
+      map.entrySet().clear();  
+      fail("Modified unmodifiable map");
+    }catch (UnsupportedOperationException e) {
+      // expected
+      assertEquals("Size of unmodifiable map has changed", size, map.size());
+    }
+    
+    try{
+      map.keySet().clear();  
+      fail("Modified unmodifiable map");
+    }catch (UnsupportedOperationException e) {
+      // expected
+      assertEquals("Size of unmodifiable map has changed", size, map.size());
+    }
+    
+    try{
+      map.put((Object) NOT_IN_MAP, 3);  
+      fail("Modified unmodifiable map");
+    }catch (UnsupportedOperationException e) {
+      // expected
+      assertFalse("Test String has been added to unmodifiable map", map.containsKey(NOT_IN_MAP));
+      assertNull("Test String has been added to unmodifiable map", map.get(NOT_IN_MAP));
+      assertEquals("Size of unmodifiable map has changed", size, map.size());
+    }
+    
+    try{
+      map.putAll(Collections.singletonMap(NOT_IN_MAP, 3));  
+      fail("Modified unmodifiable map");
+    }catch (UnsupportedOperationException e) {
+      // expected
+      assertFalse("Test String has been added to unmodifiable map", map.containsKey(NOT_IN_MAP));
+      assertNull("Test String has been added to unmodifiable map", map.get(NOT_IN_MAP));
+      assertEquals("Size of unmodifiable map has changed", size, map.size());
+    }
+    
+    assertTrue(map.containsKey("foo"));  
+    assertEquals(1, map.get("foo").intValue());  
+    assertTrue(map.containsKey("bar"));  
+    assertEquals(2, map.get("bar").intValue());  
+  }
+  
+  public void testToString() {
+    CharArrayMap<Integer> cm = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, Collections.singletonMap("test",1), false);
+    assertEquals("[test]",cm.keySet().toString());
+    assertEquals("[1]",cm.values().toString());
+    assertEquals("[test=1]",cm.entrySet().toString());
+    assertEquals("{test=1}",cm.toString());
+    cm.put("test2", 2);
+    assertTrue(cm.keySet().toString().contains(", "));
+    assertTrue(cm.values().toString().contains(", "));
+    assertTrue(cm.entrySet().toString().contains(", "));
+    assertTrue(cm.toString().contains(", "));
+  }
+}
+
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestCharArraySet.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestCharArraySet.java
new file mode 100755
index 0000000..cbeaf74
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestCharArraySet.java
@@ -0,0 +1,541 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.Iterator;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
+
+
+public class TestCharArraySet extends LuceneTestCase {
+  
+  static final String[] TEST_STOP_WORDS = {
+    "a", "an", "and", "are", "as", "at", "be", "but", "by",
+    "for", "if", "in", "into", "is", "it",
+    "no", "not", "of", "on", "or", "such",
+    "that", "the", "their", "then", "there", "these",
+    "they", "this", "to", "was", "will", "with"
+  };
+  
+  
+  public void testRehash() throws Exception {
+    CharArraySet cas = new CharArraySet(TEST_VERSION_CURRENT, 0, true);
+    for(int i=0;i<TEST_STOP_WORDS.length;i++)
+      cas.add(TEST_STOP_WORDS[i]);
+    assertEquals(TEST_STOP_WORDS.length, cas.size());
+    for(int i=0;i<TEST_STOP_WORDS.length;i++)
+      assertTrue(cas.contains(TEST_STOP_WORDS[i]));
+  }
+
+  public void testNonZeroOffset() {
+    String[] words={"Hello","World","this","is","a","test"};
+    char[] findme="xthisy".toCharArray();   
+    CharArraySet set=new CharArraySet(TEST_VERSION_CURRENT, 10,true);
+    set.addAll(Arrays.asList(words));
+    assertTrue(set.contains(findme, 1, 4));
+    assertTrue(set.contains(new String(findme,1,4)));
+    
+    // test unmodifiable
+    set = CharArraySet.unmodifiableSet(set);
+    assertTrue(set.contains(findme, 1, 4));
+    assertTrue(set.contains(new String(findme,1,4)));
+  }
+  
+  public void testObjectContains() {
+    CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
+    Integer val = Integer.valueOf(1);
+    set.add(val);
+    assertTrue(set.contains(val));
+    assertTrue(set.contains(new Integer(1))); // another integer
+    assertTrue(set.contains("1"));
+    assertTrue(set.contains(new char[]{'1'}));
+    // test unmodifiable
+    set = CharArraySet.unmodifiableSet(set);
+    assertTrue(set.contains(val));
+    assertTrue(set.contains(new Integer(1))); // another integer
+    assertTrue(set.contains("1"));
+    assertTrue(set.contains(new char[]{'1'}));
+  }
+  
+  public void testClear(){
+    CharArraySet set=new CharArraySet(TEST_VERSION_CURRENT, 10,true);
+    set.addAll(Arrays.asList(TEST_STOP_WORDS));
+    assertEquals("Not all words added", TEST_STOP_WORDS.length, set.size());
+    set.clear();
+    assertEquals("not empty", 0, set.size());
+    for(int i=0;i<TEST_STOP_WORDS.length;i++)
+      assertFalse(set.contains(TEST_STOP_WORDS[i]));
+    set.addAll(Arrays.asList(TEST_STOP_WORDS));
+    assertEquals("Not all words added", TEST_STOP_WORDS.length, set.size());
+    for(int i=0;i<TEST_STOP_WORDS.length;i++)
+      assertTrue(set.contains(TEST_STOP_WORDS[i]));
+  }
+  
+  public void testModifyOnUnmodifiable(){
+    CharArraySet set=new CharArraySet(TEST_VERSION_CURRENT, 10, true);
+    set.addAll(Arrays.asList(TEST_STOP_WORDS));
+    final int size = set.size();
+    set = CharArraySet.unmodifiableSet(set);
+    assertEquals("Set size changed due to unmodifiableSet call" , size, set.size());
+    String NOT_IN_SET = "SirGallahad";
+    assertFalse("Test String already exists in set", set.contains(NOT_IN_SET));
+    
+    try{
+      set.add(NOT_IN_SET.toCharArray());  
+      fail("Modified unmodifiable set");
+    }catch (UnsupportedOperationException e) {
+      // expected
+      assertFalse("Test String has been added to unmodifiable set", set.contains(NOT_IN_SET));
+      assertEquals("Size of unmodifiable set has changed", size, set.size());
+    }
+    
+    try{
+      set.add(NOT_IN_SET);  
+      fail("Modified unmodifiable set");
+    }catch (UnsupportedOperationException e) {
+      // expected
+      assertFalse("Test String has been added to unmodifiable set", set.contains(NOT_IN_SET));
+      assertEquals("Size of unmodifiable set has changed", size, set.size());
+    }
+    
+    try{
+      set.add(new StringBuilder(NOT_IN_SET));  
+      fail("Modified unmodifiable set");
+    }catch (UnsupportedOperationException e) {
+      // expected
+      assertFalse("Test String has been added to unmodifiable set", set.contains(NOT_IN_SET));
+      assertEquals("Size of unmodifiable set has changed", size, set.size());
+    }
+    
+    try{
+      set.clear();  
+      fail("Modified unmodifiable set");
+    }catch (UnsupportedOperationException e) {
+      // expected
+      assertFalse("Changed unmodifiable set", set.contains(NOT_IN_SET));
+      assertEquals("Size of unmodifiable set has changed", size, set.size());
+    }
+    try{
+      set.add((Object) NOT_IN_SET);  
+      fail("Modified unmodifiable set");
+    }catch (UnsupportedOperationException e) {
+      // expected
+      assertFalse("Test String has been added to unmodifiable set", set.contains(NOT_IN_SET));
+      assertEquals("Size of unmodifiable set has changed", size, set.size());
+    }
+    
+    // This test was changed in 3.1, as a contains() call on the given Collection using the "correct" iterator's
+    // current key (now a char[]) on a Set<String> would not hit any element of the CAS and therefor never call
+    // remove() on the iterator
+    try{
+      set.removeAll(new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), true));  
+      fail("Modified unmodifiable set");
+    }catch (UnsupportedOperationException e) {
+      // expected
+      assertEquals("Size of unmodifiable set has changed", size, set.size());
+    }
+    
+    try{
+      set.retainAll(new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(NOT_IN_SET), true));  
+      fail("Modified unmodifiable set");
+    }catch (UnsupportedOperationException e) {
+      // expected
+      assertEquals("Size of unmodifiable set has changed", size, set.size());
+    }
+    
+    try{
+      set.addAll(Arrays.asList(new String[]{NOT_IN_SET}));  
+      fail("Modified unmodifiable set");
+    }catch (UnsupportedOperationException e) {
+      // expected
+      assertFalse("Test String has been added to unmodifiable set", set.contains(NOT_IN_SET));
+    }
+    
+    for (int i = 0; i < TEST_STOP_WORDS.length; i++) {
+      assertTrue(set.contains(TEST_STOP_WORDS[i]));  
+    }
+  }
+  
+  public void testUnmodifiableSet(){
+    CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 10,true);
+    set.addAll(Arrays.asList(TEST_STOP_WORDS));
+    set.add(Integer.valueOf(1));
+    final int size = set.size();
+    set = CharArraySet.unmodifiableSet(set);
+    assertEquals("Set size changed due to unmodifiableSet call" , size, set.size());
+    for (String stopword : TEST_STOP_WORDS) {
+      assertTrue(set.contains(stopword));
+    }
+    assertTrue(set.contains(Integer.valueOf(1)));
+    assertTrue(set.contains("1"));
+    assertTrue(set.contains(new char[]{'1'}));
+    
+    try{
+      CharArraySet.unmodifiableSet(null);
+      fail("can not make null unmodifiable");
+    }catch (NullPointerException e) {
+      // expected
+    }
+  }
+  
+  public void testSupplementaryChars() {
+    String missing = "Term %s is missing in the set";
+    String falsePos = "Term %s is in the set but shouldn't";
+    // for reference see
+    // http://unicode.org/cldr/utility/list-unicodeset.jsp?a=[[%3ACase_Sensitive%3DTrue%3A]%26[^[\u0000-\uFFFF]]]&esc=on
+    String[] upperArr = new String[] {"Abc\ud801\udc1c",
+        "\ud801\udc1c\ud801\udc1cCDE", "A\ud801\udc1cB"};
+    String[] lowerArr = new String[] {"abc\ud801\udc44",
+        "\ud801\udc44\ud801\udc44cde", "a\ud801\udc44b"};
+    CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), true);
+    for (String upper : upperArr) {
+      set.add(upper);
+    }
+    for (int i = 0; i < upperArr.length; i++) {
+      assertTrue(String.format(missing, upperArr[i]), set.contains(upperArr[i]));
+      assertTrue(String.format(missing, lowerArr[i]), set.contains(lowerArr[i]));
+    }
+    set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), false);
+    for (String upper : upperArr) {
+      set.add(upper);
+    }
+    for (int i = 0; i < upperArr.length; i++) {
+      assertTrue(String.format(missing, upperArr[i]), set.contains(upperArr[i]));
+      assertFalse(String.format(falsePos, lowerArr[i]), set.contains(lowerArr[i]));
+    }
+  }
+  
+  public void testSingleHighSurrogate() {
+    String missing = "Term %s is missing in the set";
+    String falsePos = "Term %s is in the set but shouldn't";
+    String[] upperArr = new String[] { "ABC\uD800", "ABC\uD800EfG",
+        "\uD800EfG", "\uD800\ud801\udc1cB" };
+
+    String[] lowerArr = new String[] { "abc\uD800", "abc\uD800efg",
+        "\uD800efg", "\uD800\ud801\udc44b" };
+    CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, Arrays
+        .asList(TEST_STOP_WORDS), true);
+    for (String upper : upperArr) {
+      set.add(upper);
+    }
+    for (int i = 0; i < upperArr.length; i++) {
+      assertTrue(String.format(missing, upperArr[i]), set.contains(upperArr[i]));
+      assertTrue(String.format(missing, lowerArr[i]), set.contains(lowerArr[i]));
+    }
+    set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS),
+        false);
+    for (String upper : upperArr) {
+      set.add(upper);
+    }
+    for (int i = 0; i < upperArr.length; i++) {
+      assertTrue(String.format(missing, upperArr[i]), set.contains(upperArr[i]));
+      assertFalse(String.format(falsePos, upperArr[i]), set
+          .contains(lowerArr[i]));
+    }
+  }
+  
+  /**
+   * @deprecated remove this test when lucene 3.0 "broken unicode 4" support is
+   *             no longer needed.
+   */
+  @Deprecated
+  public void testSupplementaryCharsBWCompat() {
+    String missing = "Term %s is missing in the set";
+    String falsePos = "Term %s is in the set but shouldn't";
+    // for reference see
+    // http://unicode.org/cldr/utility/list-unicodeset.jsp?a=[[%3ACase_Sensitive%3DTrue%3A]%26[^[\u0000-\uFFFF]]]&esc=on
+    String[] upperArr = new String[] {"Abc\ud801\udc1c",
+        "\ud801\udc1c\ud801\udc1cCDE", "A\ud801\udc1cB"};
+    String[] lowerArr = new String[] {"abc\ud801\udc44",
+        "\ud801\udc44\ud801\udc44cde", "a\ud801\udc44b"};
+    CharArraySet set = new CharArraySet(Version.LUCENE_30, Arrays.asList(TEST_STOP_WORDS), true);
+    for (String upper : upperArr) {
+      set.add(upper);
+    }
+    for (int i = 0; i < upperArr.length; i++) {
+      assertTrue(String.format(missing, upperArr[i]), set.contains(upperArr[i]));
+      assertFalse(String.format(falsePos, lowerArr[i]), set.contains(lowerArr[i]));
+    }
+    set = new CharArraySet(Version.LUCENE_30, Arrays.asList(TEST_STOP_WORDS), false);
+    for (String upper : upperArr) {
+      set.add(upper);
+    }
+    for (int i = 0; i < upperArr.length; i++) {
+      assertTrue(String.format(missing, upperArr[i]), set.contains(upperArr[i]));
+      assertFalse(String.format(falsePos, lowerArr[i]), set.contains(lowerArr[i]));
+    }
+  }
+
+  /**
+   * @deprecated remove this test when lucene 3.0 "broken unicode 4" support is
+   *             no longer needed.
+   */
+  @Deprecated
+  public void testSingleHighSurrogateBWComapt() {
+    String missing = "Term %s is missing in the set";
+    String falsePos = "Term %s is in the set but shouldn't";
+    String[] upperArr = new String[] { "ABC\uD800", "ABC\uD800EfG",
+        "\uD800EfG", "\uD800\ud801\udc1cB" };
+
+    String[] lowerArr = new String[] { "abc\uD800", "abc\uD800efg",
+        "\uD800efg", "\uD800\ud801\udc44b" };
+    CharArraySet set = new CharArraySet(Version.LUCENE_30, Arrays
+        .asList(TEST_STOP_WORDS), true);
+    for (String upper : upperArr) {
+      set.add(upper);
+    }
+    for (int i = 0; i < upperArr.length; i++) {
+      assertTrue(String.format(missing, upperArr[i]), set.contains(upperArr[i]));
+      if (i == lowerArr.length - 1)
+        assertFalse(String.format(falsePos, lowerArr[i]), set
+            .contains(lowerArr[i]));
+      else
+        assertTrue(String.format(missing, lowerArr[i]), set
+            .contains(lowerArr[i]));
+    }
+    set = new CharArraySet(Version.LUCENE_30, Arrays.asList(TEST_STOP_WORDS),
+        false);
+    for (String upper : upperArr) {
+      set.add(upper);
+    }
+    for (int i = 0; i < upperArr.length; i++) {
+      assertTrue(String.format(missing, upperArr[i]), set.contains(upperArr[i]));
+      assertFalse(String.format(falsePos, lowerArr[i]), set
+          .contains(lowerArr[i]));
+    }
+  }
+  
+  @SuppressWarnings("deprecated")
+  public void testCopyCharArraySetBWCompat() {
+    CharArraySet setIngoreCase = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
+    CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false);
+
+    List<String> stopwords = Arrays.asList(TEST_STOP_WORDS);
+    List<String> stopwordsUpper = new ArrayList<String>();
+    for (String string : stopwords) {
+      stopwordsUpper.add(string.toUpperCase());
+    }
+    setIngoreCase.addAll(Arrays.asList(TEST_STOP_WORDS));
+    setIngoreCase.add(Integer.valueOf(1));
+    setCaseSensitive.addAll(Arrays.asList(TEST_STOP_WORDS));
+    setCaseSensitive.add(Integer.valueOf(1));
+
+    // This should use the deprecated methods, because it checks a bw compatibility.
+    CharArraySet copy = CharArraySet.copy(setIngoreCase);
+    CharArraySet copyCaseSens = CharArraySet.copy(setCaseSensitive);
+
+    assertEquals(setIngoreCase.size(), copy.size());
+    assertEquals(setCaseSensitive.size(), copy.size());
+
+    assertTrue(copy.containsAll(stopwords));
+    assertTrue(copy.containsAll(stopwordsUpper));
+    assertTrue(copyCaseSens.containsAll(stopwords));
+    for (String string : stopwordsUpper) {
+      assertFalse(copyCaseSens.contains(string));
+    }
+    // test adding terms to the copy
+    List<String> newWords = new ArrayList<String>();
+    for (String string : stopwords) {
+      newWords.add(string+"_1");
+    }
+    copy.addAll(newWords);
+    
+    assertTrue(copy.containsAll(stopwords));
+    assertTrue(copy.containsAll(stopwordsUpper));
+    assertTrue(copy.containsAll(newWords));
+    // new added terms are not in the source set
+    for (String string : newWords) {
+      assertFalse(setIngoreCase.contains(string));  
+      assertFalse(setCaseSensitive.contains(string));  
+
+    }
+  }
+  
+  /**
+   * Test the static #copy() function with a CharArraySet as a source
+   */
+  public void testCopyCharArraySet() {
+    CharArraySet setIngoreCase = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
+    CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false);
+
+    List<String> stopwords = Arrays.asList(TEST_STOP_WORDS);
+    List<String> stopwordsUpper = new ArrayList<String>();
+    for (String string : stopwords) {
+      stopwordsUpper.add(string.toUpperCase());
+    }
+    setIngoreCase.addAll(Arrays.asList(TEST_STOP_WORDS));
+    setIngoreCase.add(Integer.valueOf(1));
+    setCaseSensitive.addAll(Arrays.asList(TEST_STOP_WORDS));
+    setCaseSensitive.add(Integer.valueOf(1));
+
+    CharArraySet copy = CharArraySet.copy(TEST_VERSION_CURRENT, setIngoreCase);
+    CharArraySet copyCaseSens = CharArraySet.copy(TEST_VERSION_CURRENT, setCaseSensitive);
+
+    assertEquals(setIngoreCase.size(), copy.size());
+    assertEquals(setCaseSensitive.size(), copy.size());
+
+    assertTrue(copy.containsAll(stopwords));
+    assertTrue(copy.containsAll(stopwordsUpper));
+    assertTrue(copyCaseSens.containsAll(stopwords));
+    for (String string : stopwordsUpper) {
+      assertFalse(copyCaseSens.contains(string));
+    }
+    // test adding terms to the copy
+    List<String> newWords = new ArrayList<String>();
+    for (String string : stopwords) {
+      newWords.add(string+"_1");
+    }
+    copy.addAll(newWords);
+    
+    assertTrue(copy.containsAll(stopwords));
+    assertTrue(copy.containsAll(stopwordsUpper));
+    assertTrue(copy.containsAll(newWords));
+    // new added terms are not in the source set
+    for (String string : newWords) {
+      assertFalse(setIngoreCase.contains(string));  
+      assertFalse(setCaseSensitive.contains(string));  
+
+    }
+  }
+  
+  /**
+   * Test the static #copy() function with a JDK {@link Set} as a source
+   */
+  public void testCopyJDKSet() {
+    Set<String> set = new HashSet<String>();
+
+    List<String> stopwords = Arrays.asList(TEST_STOP_WORDS);
+    List<String> stopwordsUpper = new ArrayList<String>();
+    for (String string : stopwords) {
+      stopwordsUpper.add(string.toUpperCase());
+    }
+    set.addAll(Arrays.asList(TEST_STOP_WORDS));
+
+    CharArraySet copy = CharArraySet.copy(TEST_VERSION_CURRENT, set);
+
+    assertEquals(set.size(), copy.size());
+    assertEquals(set.size(), copy.size());
+
+    assertTrue(copy.containsAll(stopwords));
+    for (String string : stopwordsUpper) {
+      assertFalse(copy.contains(string));
+    }
+    
+    List<String> newWords = new ArrayList<String>();
+    for (String string : stopwords) {
+      newWords.add(string+"_1");
+    }
+    copy.addAll(newWords);
+    
+    assertTrue(copy.containsAll(stopwords));
+    assertTrue(copy.containsAll(newWords));
+    // new added terms are not in the source set
+    for (String string : newWords) {
+      assertFalse(set.contains(string));  
+    }
+  }
+  
+  /**
+   * Tests a special case of {@link CharArraySet#copy(Version, Set)} where the
+   * set to copy is the {@link CharArraySet#EMPTY_SET}
+   */
+  public void testCopyEmptySet() {
+    assertSame(CharArraySet.EMPTY_SET, 
+        CharArraySet.copy(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET));
+  }
+
+  /**
+   * Smoketests the static empty set
+   */
+  public void testEmptySet() {
+    assertEquals(0, CharArraySet.EMPTY_SET.size());
+    
+    assertTrue(CharArraySet.EMPTY_SET.isEmpty());
+    for (String stopword : TEST_STOP_WORDS) {
+      assertFalse(CharArraySet.EMPTY_SET.contains(stopword));
+    }
+    assertFalse(CharArraySet.EMPTY_SET.contains("foo"));
+    assertFalse(CharArraySet.EMPTY_SET.contains((Object) "foo"));
+    assertFalse(CharArraySet.EMPTY_SET.contains("foo".toCharArray()));
+    assertFalse(CharArraySet.EMPTY_SET.contains("foo".toCharArray(),0,3));
+  }
+  
+  /**
+   * Test for NPE
+   */
+  public void testContainsWithNull() {
+    CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
+    try {
+      set.contains((char[]) null, 0, 10);
+      fail("null value must raise NPE");
+    } catch (NullPointerException e) {}
+    try {
+      set.contains((CharSequence) null);
+      fail("null value must raise NPE");
+    } catch (NullPointerException e) {}
+    try {
+      set.contains((Object) null);
+      fail("null value must raise NPE");
+    } catch (NullPointerException e) {}
+  }
+  
+  @Deprecated @SuppressWarnings("unchecked")
+  public void testIterator() {
+    HashSet<String> hset = new HashSet<String>();
+    hset.addAll(Arrays.asList(TEST_STOP_WORDS));
+
+    assertTrue("in 3.0 version, iterator should be CharArraySetIterator",
+      ((Iterator) CharArraySet.copy(Version.LUCENE_30, hset).iterator()) instanceof CharArraySet.CharArraySetIterator);
+
+    CharArraySet set = CharArraySet.copy(TEST_VERSION_CURRENT, hset);
+    assertFalse("in current version, iterator should not be CharArraySetIterator",
+      ((Iterator) set.iterator()) instanceof CharArraySet.CharArraySetIterator);
+    
+    Iterator<String> it = set.stringIterator();
+    assertTrue(it instanceof CharArraySet.CharArraySetIterator);
+    while (it.hasNext()) {
+      // as the set returns String instances, this must work:
+      assertTrue(hset.contains(it.next()));
+      try {
+        it.remove();
+        fail("remove() should not work on CharArraySetIterator");
+      } catch (UnsupportedOperationException uoe) {
+        // pass
+      }
+    }
+  }
+  
+  public void testToString() {
+    CharArraySet set = CharArraySet.copy(TEST_VERSION_CURRENT, Collections.singleton("test"));
+    assertEquals("[test]", set.toString());
+    set.add("test2");
+    assertTrue(set.toString().contains(", "));
+    
+    set = CharArraySet.copy(Version.LUCENE_30, Collections.singleton("test"));
+    assertEquals("[test]", set.toString());
+    set.add("test2");
+    assertTrue(set.toString().contains(", "));
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestCharFilter.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestCharFilter.java
new file mode 100644
index 0000000..1d2394f
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestCharFilter.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.analysis;
+
+import java.io.StringReader;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestCharFilter extends LuceneTestCase {
+
+  public void testCharFilter1() throws Exception {
+    CharStream cs = new CharFilter1( CharReader.get( new StringReader("") ) );
+    assertEquals( "corrected offset is invalid", 1, cs.correctOffset( 0 ) );
+  }
+
+  public void testCharFilter2() throws Exception {
+    CharStream cs = new CharFilter2( CharReader.get( new StringReader("") ) );
+    assertEquals( "corrected offset is invalid", 2, cs.correctOffset( 0 ) );
+  }
+
+  public void testCharFilter12() throws Exception {
+    CharStream cs = new CharFilter2( new CharFilter1( CharReader.get( new StringReader("") ) ) );
+    assertEquals( "corrected offset is invalid", 3, cs.correctOffset( 0 ) );
+  }
+
+  public void testCharFilter11() throws Exception {
+    CharStream cs = new CharFilter1( new CharFilter1( CharReader.get( new StringReader("") ) ) );
+    assertEquals( "corrected offset is invalid", 2, cs.correctOffset( 0 ) );
+  }
+
+  static class CharFilter1 extends CharFilter {
+
+    protected CharFilter1(CharStream in) {
+      super(in);
+    }
+
+    @Override
+    protected int correct(int currentOff) {
+      return currentOff + 1;
+    }
+  }
+
+  static class CharFilter2 extends CharFilter {
+
+    protected CharFilter2(CharStream in) {
+      super(in);
+    }
+
+    @Override
+    protected int correct(int currentOff) {
+      return currentOff + 2;
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestCharTokenizers.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestCharTokenizers.java
new file mode 100644
index 0000000..ff6f961
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestCharTokenizers.java
@@ -0,0 +1,222 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+import java.io.StringReader;
+
+import org.apache.lucene.util.Version;
+
+/**
+ * Testcase for {@link CharTokenizer} subclasses
+ */
+public class TestCharTokenizers extends BaseTokenStreamTestCase {
+
+  /*
+   * test to read surrogate pairs without loosing the pairing 
+   * if the surrogate pair is at the border of the internal IO buffer
+   */
+  public void testReadSupplementaryChars() throws IOException {
+    StringBuilder builder = new StringBuilder();
+    // create random input
+    int num = 1024 + random.nextInt(1024);
+    num *= RANDOM_MULTIPLIER;
+    for (int i = 1; i < num; i++) {
+      builder.append("\ud801\udc1cabc");
+      if((i % 10) == 0)
+        builder.append(" ");
+    }
+    // internal buffer size is 1024 make sure we have a surrogate pair right at the border
+    builder.insert(1023, "\ud801\udc1c");
+    LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(
+        TEST_VERSION_CURRENT, new StringReader(builder.toString()));
+    assertTokenStreamContents(tokenizer, builder.toString().toLowerCase().split(" "));
+  }
+  
+  /*
+   * test to extend the buffer TermAttribute buffer internally. If the internal
+   * alg that extends the size of the char array only extends by 1 char and the
+   * next char to be filled in is a supplementary codepoint (using 2 chars) an
+   * index out of bound exception is triggered.
+   */
+  public void testExtendCharBuffer() throws IOException {
+    for (int i = 0; i < 40; i++) {
+      StringBuilder builder = new StringBuilder();
+      for (int j = 0; j < 1+i; j++) {
+        builder.append("a");
+      }
+      builder.append("\ud801\udc1cabc");
+      LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(
+          TEST_VERSION_CURRENT, new StringReader(builder.toString()));
+      assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase()});
+    }
+  }
+  
+  /*
+   * tests the max word length of 255 - tokenizer will split at the 255 char no matter what happens
+   */
+  public void testMaxWordLength() throws IOException {
+    StringBuilder builder = new StringBuilder();
+
+    for (int i = 0; i < 255; i++) {
+      builder.append("A");
+    }
+    LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(
+        TEST_VERSION_CURRENT, new StringReader(builder.toString() + builder.toString()));
+    assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(), builder.toString().toLowerCase()});
+  }
+  
+  /*
+   * tests the max word length of 255 with a surrogate pair at position 255
+   */
+  public void testMaxWordLengthWithSupplementary() throws IOException {
+    StringBuilder builder = new StringBuilder();
+
+    for (int i = 0; i < 254; i++) {
+      builder.append("A");
+    }
+    builder.append("\ud801\udc1c");
+    LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(
+        TEST_VERSION_CURRENT, new StringReader(builder.toString() + builder.toString()));
+    assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(), builder.toString().toLowerCase()});
+  }
+
+  public void testLowerCaseTokenizer() throws IOException {
+    StringReader reader = new StringReader("Tokenizer \ud801\udc1ctest");
+    LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(TEST_VERSION_CURRENT,
+        reader);
+    assertTokenStreamContents(tokenizer, new String[] { "tokenizer",
+        "\ud801\udc44test" });
+  }
+
+  public void testLowerCaseTokenizerBWCompat() throws IOException {
+    StringReader reader = new StringReader("Tokenizer \ud801\udc1ctest");
+    LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(Version.LUCENE_30,
+        reader);
+    assertTokenStreamContents(tokenizer, new String[] { "tokenizer", "test" });
+  }
+
+  public void testWhitespaceTokenizer() throws IOException {
+    StringReader reader = new StringReader("Tokenizer \ud801\udc1ctest");
+    WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
+        reader);
+    assertTokenStreamContents(tokenizer, new String[] { "Tokenizer",
+        "\ud801\udc1ctest" });
+  }
+
+  public void testWhitespaceTokenizerBWCompat() throws IOException {
+    StringReader reader = new StringReader("Tokenizer \ud801\udc1ctest");
+    WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_30,
+        reader);
+    assertTokenStreamContents(tokenizer, new String[] { "Tokenizer",
+        "\ud801\udc1ctest" });
+  }
+
+  public void testIsTokenCharCharInSubclass() {
+    new TestingCharTokenizer(Version.LUCENE_30, new StringReader(""));
+    try {
+      new TestingCharTokenizer(TEST_VERSION_CURRENT, new StringReader(""));
+      fail("version 3.1 is not permitted if char based method is implemented");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+  }
+
+  public void testNormalizeCharInSubclass() {
+    new TestingCharTokenizerNormalize(Version.LUCENE_30, new StringReader(""));
+    try {
+      new TestingCharTokenizerNormalize(TEST_VERSION_CURRENT,
+          new StringReader(""));
+      fail("version 3.1 is not permitted if char based method is implemented");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+  }
+
+  public void testNormalizeAndIsTokenCharCharInSubclass() {
+    new TestingCharTokenizerNormalizeIsTokenChar(Version.LUCENE_30,
+        new StringReader(""));
+    try {
+      new TestingCharTokenizerNormalizeIsTokenChar(TEST_VERSION_CURRENT,
+          new StringReader(""));
+      fail("version 3.1 is not permitted if char based method is implemented");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+  }
+
+  static final class TestingCharTokenizer extends CharTokenizer {
+    public TestingCharTokenizer(Version matchVersion, Reader input) {
+      super(matchVersion, input);
+    }
+
+    @Override
+    protected boolean isTokenChar(int c) {
+      return Character.isLetter(c);
+    }
+
+    @Deprecated @Override
+    protected boolean isTokenChar(char c) {
+      return Character.isLetter(c);
+    }
+  }
+
+  static final class TestingCharTokenizerNormalize extends CharTokenizer {
+    public TestingCharTokenizerNormalize(Version matchVersion, Reader input) {
+      super(matchVersion, input);
+    }
+
+    @Deprecated @Override
+    protected char normalize(char c) {
+      return c;
+    }
+
+    @Override
+    protected int normalize(int c) {
+      return c;
+    }
+  }
+
+  static final class TestingCharTokenizerNormalizeIsTokenChar extends CharTokenizer {
+    public TestingCharTokenizerNormalizeIsTokenChar(Version matchVersion,
+        Reader input) {
+      super(matchVersion, input);
+    }
+
+    @Deprecated @Override
+    protected char normalize(char c) {
+      return c;
+    }
+
+    @Override
+    protected int normalize(int c) {
+      return c;
+    }
+
+    @Override
+    protected boolean isTokenChar(int c) {
+      return Character.isLetter(c);
+    }
+
+    @Deprecated @Override
+    protected boolean isTokenChar(char c) {
+      return Character.isLetter(c);
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestClassicAnalyzer.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestClassicAnalyzer.java
new file mode 100644
index 0000000..1987e46
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestClassicAnalyzer.java
@@ -0,0 +1,309 @@
+package org.apache.lucene.analysis;
+
+import org.apache.lucene.analysis.standard.ClassicAnalyzer;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermPositions;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.Version;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+
+/**
+ * Copyright 2004 The Apache Software Foundation
+ * <p/>
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestClassicAnalyzer extends BaseTokenStreamTestCase {
+
+  private Analyzer a = new ClassicAnalyzer(TEST_VERSION_CURRENT);
+
+  public void testMaxTermLength() throws Exception {
+    ClassicAnalyzer sa = new ClassicAnalyzer(TEST_VERSION_CURRENT);
+    sa.setMaxTokenLength(5);
+    assertAnalyzesTo(sa, "ab cd toolong xy z", new String[]{"ab", "cd", "xy", "z"});
+  }
+
+  public void testMaxTermLength2() throws Exception {
+    ClassicAnalyzer sa = new ClassicAnalyzer(TEST_VERSION_CURRENT);
+    assertAnalyzesTo(sa, "ab cd toolong xy z", new String[]{"ab", "cd", "toolong", "xy", "z"});
+    sa.setMaxTokenLength(5);
+    
+    assertAnalyzesTo(sa, "ab cd toolong xy z", new String[]{"ab", "cd", "xy", "z"}, new int[]{1, 1, 2, 1});
+  }
+
+  public void testMaxTermLength3() throws Exception {
+    char[] chars = new char[255];
+    for(int i=0;i<255;i++)
+      chars[i] = 'a';
+    String longTerm = new String(chars, 0, 255);
+    
+    assertAnalyzesTo(a, "ab cd " + longTerm + " xy z", new String[]{"ab", "cd", longTerm, "xy", "z"});
+    assertAnalyzesTo(a, "ab cd " + longTerm + "a xy z", new String[]{"ab", "cd", "xy", "z"});
+  }
+
+  public void testAlphanumeric() throws Exception {
+    // alphanumeric tokens
+    assertAnalyzesTo(a, "B2B", new String[]{"b2b"});
+    assertAnalyzesTo(a, "2B", new String[]{"2b"});
+  }
+
+  public void testUnderscores() throws Exception {
+    // underscores are delimiters, but not in email addresses (below)
+    assertAnalyzesTo(a, "word_having_underscore", new String[]{"word", "having", "underscore"});
+    assertAnalyzesTo(a, "word_with_underscore_and_stopwords", new String[]{"word", "underscore", "stopwords"});
+  }
+
+  public void testDelimiters() throws Exception {
+    // other delimiters: "-", "/", ","
+    assertAnalyzesTo(a, "some-dashed-phrase", new String[]{"some", "dashed", "phrase"});
+    assertAnalyzesTo(a, "dogs,chase,cats", new String[]{"dogs", "chase", "cats"});
+    assertAnalyzesTo(a, "ac/dc", new String[]{"ac", "dc"});
+  }
+
+  public void testApostrophes() throws Exception {
+    // internal apostrophes: O'Reilly, you're, O'Reilly's
+    // possessives are actually removed by StardardFilter, not the tokenizer
+    assertAnalyzesTo(a, "O'Reilly", new String[]{"o'reilly"});
+    assertAnalyzesTo(a, "you're", new String[]{"you're"});
+    assertAnalyzesTo(a, "she's", new String[]{"she"});
+    assertAnalyzesTo(a, "Jim's", new String[]{"jim"});
+    assertAnalyzesTo(a, "don't", new String[]{"don't"});
+    assertAnalyzesTo(a, "O'Reilly's", new String[]{"o'reilly"});
+  }
+
+  public void testTSADash() throws Exception {
+    // t and s had been stopwords in Lucene <= 2.0, which made it impossible
+    // to correctly search for these terms:
+    assertAnalyzesTo(a, "s-class", new String[]{"s", "class"});
+    assertAnalyzesTo(a, "t-com", new String[]{"t", "com"});
+    // 'a' is still a stopword:
+    assertAnalyzesTo(a, "a-class", new String[]{"class"});
+  }
+
+  public void testCompanyNames() throws Exception {
+    // company names
+    assertAnalyzesTo(a, "AT&T", new String[]{"at&t"});
+    assertAnalyzesTo(a, "Excite@Home", new String[]{"excite@home"});
+  }
+
+  public void testLucene1140() throws Exception {
+    try {
+      ClassicAnalyzer analyzer = new ClassicAnalyzer(TEST_VERSION_CURRENT);
+      assertAnalyzesTo(analyzer, "www.nutch.org.", new String[]{ "www.nutch.org" }, new String[] { "<HOST>" });
+    } catch (NullPointerException e) {
+      fail("Should not throw an NPE and it did");
+    }
+
+  }
+
+  public void testDomainNames() throws Exception {
+    // Current lucene should not show the bug
+    ClassicAnalyzer a2 = new ClassicAnalyzer(TEST_VERSION_CURRENT);
+
+    // domain names
+    assertAnalyzesTo(a2, "www.nutch.org", new String[]{"www.nutch.org"});
+    //Notice the trailing .  See https://issues.apache.org/jira/browse/LUCENE-1068.
+    // the following should be recognized as HOST:
+    assertAnalyzesTo(a2, "www.nutch.org.", new String[]{ "www.nutch.org" }, new String[] { "<HOST>" });
+
+    // 2.3 should show the bug
+    a2 = new ClassicAnalyzer(org.apache.lucene.util.Version.LUCENE_23);
+    assertAnalyzesTo(a2, "www.nutch.org.", new String[]{ "wwwnutchorg" }, new String[] { "<ACRONYM>" });
+
+    // 2.4 should not show the bug
+    a2 = new ClassicAnalyzer(Version.LUCENE_24);
+    assertAnalyzesTo(a2, "www.nutch.org.", new String[]{ "www.nutch.org" }, new String[] { "<HOST>" });
+  }
+
+  public void testEMailAddresses() throws Exception {
+    // email addresses, possibly with underscores, periods, etc
+    assertAnalyzesTo(a, "test@example.com", new String[]{"test@example.com"});
+    assertAnalyzesTo(a, "first.lastname@example.com", new String[]{"first.lastname@example.com"});
+    assertAnalyzesTo(a, "first_lastname@example.com", new String[]{"first_lastname@example.com"});
+  }
+
+  public void testNumeric() throws Exception {
+    // floating point, serial, model numbers, ip addresses, etc.
+    // every other segment must have at least one digit
+    assertAnalyzesTo(a, "21.35", new String[]{"21.35"});
+    assertAnalyzesTo(a, "R2D2 C3PO", new String[]{"r2d2", "c3po"});
+    assertAnalyzesTo(a, "216.239.63.104", new String[]{"216.239.63.104"});
+    assertAnalyzesTo(a, "1-2-3", new String[]{"1-2-3"});
+    assertAnalyzesTo(a, "a1-b2-c3", new String[]{"a1-b2-c3"});
+    assertAnalyzesTo(a, "a1-b-c3", new String[]{"a1-b-c3"});
+  }
+
+  public void testTextWithNumbers() throws Exception {
+    // numbers
+    assertAnalyzesTo(a, "David has 5000 bones", new String[]{"david", "has", "5000", "bones"});
+  }
+
+  public void testVariousText() throws Exception {
+    // various
+    assertAnalyzesTo(a, "C embedded developers wanted", new String[]{"c", "embedded", "developers", "wanted"});
+    assertAnalyzesTo(a, "foo bar FOO BAR", new String[]{"foo", "bar", "foo", "bar"});
+    assertAnalyzesTo(a, "foo      bar .  FOO <> BAR", new String[]{"foo", "bar", "foo", "bar"});
+    assertAnalyzesTo(a, "\"QUOTED\" word", new String[]{"quoted", "word"});
+  }
+
+  public void testAcronyms() throws Exception {
+    // acronyms have their dots stripped
+    assertAnalyzesTo(a, "U.S.A.", new String[]{"usa"});
+  }
+
+  public void testCPlusPlusHash() throws Exception {
+    // It would be nice to change the grammar in StandardTokenizer.jj to make "C#" and "C++" end up as tokens.
+    assertAnalyzesTo(a, "C++", new String[]{"c"});
+    assertAnalyzesTo(a, "C#", new String[]{"c"});
+  }
+
+  public void testKorean() throws Exception {
+    // Korean words
+    assertAnalyzesTo(a, "안녕하세요 한글입니다", new String[]{"안녕하세요", "한글입니다"});
+  }
+
+  // Compliance with the "old" JavaCC-based analyzer, see:
+  // https://issues.apache.org/jira/browse/LUCENE-966#action_12516752
+
+  public void testComplianceFileName() throws Exception {
+    assertAnalyzesTo(a, "2004.jpg",
+            new String[]{"2004.jpg"},
+            new String[]{"<HOST>"});
+  }
+
+  public void testComplianceNumericIncorrect() throws Exception {
+    assertAnalyzesTo(a, "62.46",
+            new String[]{"62.46"},
+            new String[]{"<HOST>"});
+  }
+
+  public void testComplianceNumericLong() throws Exception {
+    assertAnalyzesTo(a, "978-0-94045043-1",
+            new String[]{"978-0-94045043-1"},
+            new String[]{"<NUM>"});
+  }
+
+  public void testComplianceNumericFile() throws Exception {
+    assertAnalyzesTo(
+            a,
+            "78academyawards/rules/rule02.html",
+            new String[]{"78academyawards/rules/rule02.html"},
+            new String[]{"<NUM>"});
+  }
+
+  public void testComplianceNumericWithUnderscores() throws Exception {
+    assertAnalyzesTo(
+            a,
+            "2006-03-11t082958z_01_ban130523_rtridst_0_ozabs",
+            new String[]{"2006-03-11t082958z_01_ban130523_rtridst_0_ozabs"},
+            new String[]{"<NUM>"});
+  }
+
+  public void testComplianceNumericWithDash() throws Exception {
+    assertAnalyzesTo(a, "mid-20th", new String[]{"mid-20th"},
+            new String[]{"<NUM>"});
+  }
+
+  public void testComplianceManyTokens() throws Exception {
+    assertAnalyzesTo(
+            a,
+            "/money.cnn.com/magazines/fortune/fortune_archive/2007/03/19/8402357/index.htm "
+                    + "safari-0-sheikh-zayed-grand-mosque.jpg",
+            new String[]{"money.cnn.com", "magazines", "fortune",
+                    "fortune", "archive/2007/03/19/8402357", "index.htm",
+                    "safari-0-sheikh", "zayed", "grand", "mosque.jpg"},
+            new String[]{"<HOST>", "<ALPHANUM>", "<ALPHANUM>",
+                    "<ALPHANUM>", "<NUM>", "<HOST>", "<NUM>", "<ALPHANUM>",
+                    "<ALPHANUM>", "<HOST>"});
+  }
+
+  public void testJava14BWCompatibility() throws Exception {
+    ClassicAnalyzer sa = new ClassicAnalyzer(Version.LUCENE_30);
+    assertAnalyzesTo(sa, "test\u02C6test", new String[] { "test", "test" });
+  }
+
+  /**
+   * Make sure we skip wicked long terms.
+   */
+  public void testWickedLongTerm() throws IOException {
+    RAMDirectory dir = new RAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
+      TEST_VERSION_CURRENT, new ClassicAnalyzer(TEST_VERSION_CURRENT)));
+
+    char[] chars = new char[IndexWriter.MAX_TERM_LENGTH];
+    Arrays.fill(chars, 'x');
+    Document doc = new Document();
+    final String bigTerm = new String(chars);
+
+    // This produces a too-long term:
+    String contents = "abc xyz x" + bigTerm + " another term";
+    doc.add(new Field("content", contents, Field.Store.NO, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+
+    // Make sure we can add another normal document
+    doc = new Document();
+    doc.add(new Field("content", "abc bbb ccc", Field.Store.NO, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    writer.close();
+
+    IndexReader reader = IndexReader.open(dir, true);
+
+    // Make sure all terms < max size were indexed
+    assertEquals(2, reader.docFreq(new Term("content", "abc")));
+    assertEquals(1, reader.docFreq(new Term("content", "bbb")));
+    assertEquals(1, reader.docFreq(new Term("content", "term")));
+    assertEquals(1, reader.docFreq(new Term("content", "another")));
+
+    // Make sure position is still incremented when
+    // massive term is skipped:
+    TermPositions tps = reader.termPositions(new Term("content", "another"));
+    assertTrue(tps.next());
+    assertEquals(1, tps.freq());
+    assertEquals(3, tps.nextPosition());
+
+    // Make sure the doc that has the massive term is in
+    // the index:
+    assertEquals("document with wicked long term should is not in the index!", 2, reader.numDocs());
+
+    reader.close();
+
+    // Make sure we can add a document with exactly the
+    // maximum length term, and search on that term:
+    doc = new Document();
+    doc.add(new Field("content", bigTerm, Field.Store.NO, Field.Index.ANALYZED));
+    ClassicAnalyzer sa = new ClassicAnalyzer(TEST_VERSION_CURRENT);
+    sa.setMaxTokenLength(100000);
+    writer  = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, sa));
+    writer.addDocument(doc);
+    writer.close();
+    reader = IndexReader.open(dir, true);
+    assertEquals(1, reader.docFreq(new Term("content", bigTerm)));
+    reader.close();
+
+    dir.close();
+  }
+  
+  /** blast some random strings through the analyzer */
+  public void testRandomStrings() throws Exception {
+    checkRandomData(random, new ClassicAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java
new file mode 100644
index 0000000..a7d2b95
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java
@@ -0,0 +1,110 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import java.io.StringReader;
+
+public class TestISOLatin1AccentFilter extends BaseTokenStreamTestCase {
+  public void testU() throws Exception {
+    TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl"));
+    ISOLatin1AccentFilter filter = new ISOLatin1AccentFilter(stream);
+    CharTermAttribute termAtt = filter.getAttribute(CharTermAttribute.class);
+    assertTermEquals("Des", filter, termAtt);
+    assertTermEquals("mot", filter, termAtt);
+    assertTermEquals("cles", filter, termAtt);
+    assertTermEquals("A", filter, termAtt);
+    assertTermEquals("LA", filter, termAtt);
+    assertTermEquals("CHAINE", filter, termAtt);
+    assertTermEquals("A", filter, termAtt);
+    assertTermEquals("A", filter, termAtt);
+    assertTermEquals("A", filter, termAtt);
+    assertTermEquals("A", filter, termAtt);
+    assertTermEquals("A", filter, termAtt);
+    assertTermEquals("A", filter, termAtt);
+    assertTermEquals("AE", filter, termAtt);
+    assertTermEquals("C", filter, termAtt);
+    assertTermEquals("E", filter, termAtt);
+    assertTermEquals("E", filter, termAtt);
+    assertTermEquals("E", filter, termAtt);
+    assertTermEquals("E", filter, termAtt);
+    assertTermEquals("I", filter, termAtt);
+    assertTermEquals("I", filter, termAtt);
+    assertTermEquals("I", filter, termAtt);
+    assertTermEquals("I", filter, termAtt);
+    assertTermEquals("IJ", filter, termAtt);
+    assertTermEquals("D", filter, termAtt);
+    assertTermEquals("N", filter, termAtt);
+    assertTermEquals("O", filter, termAtt);
+    assertTermEquals("O", filter, termAtt);
+    assertTermEquals("O", filter, termAtt);
+    assertTermEquals("O", filter, termAtt);
+    assertTermEquals("O", filter, termAtt);
+    assertTermEquals("O", filter, termAtt);
+    assertTermEquals("OE", filter, termAtt);
+    assertTermEquals("TH", filter, termAtt);
+    assertTermEquals("U", filter, termAtt);
+    assertTermEquals("U", filter, termAtt);
+    assertTermEquals("U", filter, termAtt);
+    assertTermEquals("U", filter, termAtt);
+    assertTermEquals("Y", filter, termAtt);
+    assertTermEquals("Y", filter, termAtt);
+    assertTermEquals("a", filter, termAtt);
+    assertTermEquals("a", filter, termAtt);
+    assertTermEquals("a", filter, termAtt);
+    assertTermEquals("a", filter, termAtt);
+    assertTermEquals("a", filter, termAtt);
+    assertTermEquals("a", filter, termAtt);
+    assertTermEquals("ae", filter, termAtt);
+    assertTermEquals("c", filter, termAtt);
+    assertTermEquals("e", filter, termAtt);
+    assertTermEquals("e", filter, termAtt);
+    assertTermEquals("e", filter, termAtt);
+    assertTermEquals("e", filter, termAtt);
+    assertTermEquals("i", filter, termAtt);
+    assertTermEquals("i", filter, termAtt);
+    assertTermEquals("i", filter, termAtt);
+    assertTermEquals("i", filter, termAtt);
+    assertTermEquals("ij", filter, termAtt);
+    assertTermEquals("d", filter, termAtt);
+    assertTermEquals("n", filter, termAtt);
+    assertTermEquals("o", filter, termAtt);
+    assertTermEquals("o", filter, termAtt);
+    assertTermEquals("o", filter, termAtt);
+    assertTermEquals("o", filter, termAtt);
+    assertTermEquals("o", filter, termAtt);
+    assertTermEquals("o", filter, termAtt);
+    assertTermEquals("oe", filter, termAtt);
+    assertTermEquals("ss", filter, termAtt);
+    assertTermEquals("th", filter, termAtt);
+    assertTermEquals("u", filter, termAtt);
+    assertTermEquals("u", filter, termAtt);
+    assertTermEquals("u", filter, termAtt);
+    assertTermEquals("u", filter, termAtt);
+    assertTermEquals("y", filter, termAtt);
+    assertTermEquals("y", filter, termAtt);
+    assertTermEquals("fi", filter, termAtt);
+    assertTermEquals("fl", filter, termAtt);
+    assertFalse(filter.incrementToken());
+  }
+  
+  void assertTermEquals(String expected, TokenStream stream, CharTermAttribute termAtt) throws Exception {
+    assertTrue(stream.incrementToken());
+    assertEquals(expected, termAtt.toString());
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java
new file mode 100644
index 0000000..6fe328a
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java
@@ -0,0 +1,103 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.StringReader;
+
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.store.RAMDirectory;
+
+public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
+  
+  private RAMDirectory directory;
+  private IndexSearcher searcher;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    directory = new RAMDirectory();
+    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new SimpleAnalyzer(
+        TEST_VERSION_CURRENT)));
+
+    Document doc = new Document();
+    doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.NOT_ANALYZED));
+    doc.add(new Field("description", "Illidium Space Modulator", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+
+    writer.close();
+
+    searcher = new IndexSearcher(directory, true);
+  }
+
+  public void testPerFieldAnalyzer() throws Exception {
+    PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new SimpleAnalyzer(TEST_VERSION_CURRENT));
+    analyzer.addAnalyzer("partnum", new KeywordAnalyzer());
+
+    QueryParser queryParser = new QueryParser(TEST_VERSION_CURRENT, "description", analyzer);
+    Query query = queryParser.parse("partnum:Q36 AND SPACE");
+
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("Q36 kept as-is",
+              "+partnum:Q36 +space", query.toString("description"));
+    assertEquals("doc found!", 1, hits.length);
+  }
+
+  public void testMutipleDocument() throws Exception {
+    RAMDirectory dir = new RAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new KeywordAnalyzer()));
+    Document doc = new Document();
+    doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new Field("partnum", "Q37", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    writer.close();
+
+    IndexReader reader = IndexReader.open(dir, true);
+    TermDocs td = reader.termDocs(new Term("partnum", "Q36"));
+    assertTrue(td.next());
+    td = reader.termDocs(new Term("partnum", "Q37"));
+    assertTrue(td.next());
+  }
+
+  // LUCENE-1441
+  public void testOffsets() throws Exception {
+    TokenStream stream = new KeywordAnalyzer().tokenStream("field", new StringReader("abcd"));
+    OffsetAttribute offsetAtt = stream.addAttribute(OffsetAttribute.class);
+    assertTrue(stream.incrementToken());
+    assertEquals(0, offsetAtt.startOffset());
+    assertEquals(4, offsetAtt.endOffset());
+  }
+  
+  /** blast some random strings through the analyzer */
+  public void testRandomStrings() throws Exception {
+    checkRandomData(random, new KeywordAnalyzer(), 10000*RANDOM_MULTIPLIER);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestKeywordMarkerFilter.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestKeywordMarkerFilter.java
new file mode 100644
index 0000000..825cf8f
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestKeywordMarkerFilter.java
@@ -0,0 +1,90 @@
+package org.apache.lucene.analysis;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Locale;
+import java.util.Set;
+
+import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.junit.Test;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Testcase for {@link KeywordMarkerFilter}
+ */
+public class TestKeywordMarkerFilter extends BaseTokenStreamTestCase {
+
+  @Test
+  public void testIncrementToken() throws IOException {
+    CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 5, true);
+    set.add("lucenefox");
+    String[] output = new String[] { "the", "quick", "brown", "LuceneFox",
+        "jumps" };
+    assertTokenStreamContents(new LowerCaseFilterMock(
+        new KeywordMarkerFilter(new MockTokenizer(new StringReader(
+            "The quIck browN LuceneFox Jumps"), MockTokenizer.WHITESPACE, false), set)), output);
+    Set<String> jdkSet = new HashSet<String>();
+    jdkSet.add("LuceneFox");
+    assertTokenStreamContents(new LowerCaseFilterMock(
+        new KeywordMarkerFilter(new MockTokenizer(new StringReader(
+            "The quIck browN LuceneFox Jumps"), MockTokenizer.WHITESPACE, false), jdkSet)), output);
+    Set<?> set2 = set;
+    assertTokenStreamContents(new LowerCaseFilterMock(
+        new KeywordMarkerFilter(new MockTokenizer(new StringReader(
+            "The quIck browN LuceneFox Jumps"), MockTokenizer.WHITESPACE, false), set2)), output);
+  }
+
+  // LUCENE-2901
+  public void testComposition() throws Exception {   
+    TokenStream ts = new LowerCaseFilterMock(
+                     new KeywordMarkerFilter(
+                     new KeywordMarkerFilter(
+                     new MockTokenizer(new StringReader("Dogs Trees Birds Houses"), MockTokenizer.WHITESPACE, false),
+                     new HashSet<String>(Arrays.asList(new String[] { "Birds", "Houses" }))), 
+                     new HashSet<String>(Arrays.asList(new String[] { "Dogs", "Trees" }))));
+    
+    assertTokenStreamContents(ts, new String[] { "Dogs", "Trees", "Birds", "Houses" });
+  }
+  
+  public static final class LowerCaseFilterMock extends TokenFilter {
+
+    private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+    private final KeywordAttribute keywordAttr = addAttribute(KeywordAttribute.class);
+
+    public LowerCaseFilterMock(TokenStream in) {
+      super(in);
+    }
+
+    @Override
+    public boolean incrementToken() throws IOException {
+      if (input.incrementToken()) {
+        if (!keywordAttr.isKeyword()) {
+          final String term = termAtt.toString().toLowerCase(Locale.ENGLISH);
+          termAtt.setEmpty().append(term);
+        }
+        return true;
+      }
+      return false;
+    }
+
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestLengthFilter.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestLengthFilter.java
new file mode 100644
index 0000000..142819f
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestLengthFilter.java
@@ -0,0 +1,44 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.StringReader;
+
+public class TestLengthFilter extends BaseTokenStreamTestCase {
+  
+  public void testFilterNoPosIncr() throws Exception {
+    TokenStream stream = new MockTokenizer(
+        new StringReader("short toolong evenmuchlongertext a ab toolong foo"), MockTokenizer.WHITESPACE, false);
+    LengthFilter filter = new LengthFilter(false, stream, 2, 6);
+    assertTokenStreamContents(filter,
+      new String[]{"short", "ab", "foo"},
+      new int[]{1, 1, 1}
+    );
+  }
+
+  public void testFilterWithPosIncr() throws Exception {
+    TokenStream stream = new MockTokenizer(
+        new StringReader("short toolong evenmuchlongertext a ab toolong foo"), MockTokenizer.WHITESPACE, false);
+    LengthFilter filter = new LengthFilter(true, stream, 2, 6);
+    assertTokenStreamContents(filter,
+      new String[]{"short", "ab", "foo"},
+      new int[]{1, 4, 2}
+    );
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java
new file mode 100644
index 0000000..d5f10ef
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.analysis;
+
+import java.io.StringReader;
+
+public class TestMappingCharFilter extends BaseTokenStreamTestCase {
+
+  NormalizeCharMap normMap;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    normMap = new NormalizeCharMap();
+
+    normMap.add( "aa", "a" );
+    normMap.add( "bbb", "b" );
+    normMap.add( "cccc", "cc" );
+
+    normMap.add( "h", "i" );
+    normMap.add( "j", "jj" );
+    normMap.add( "k", "kkk" );
+    normMap.add( "ll", "llll" );
+
+    normMap.add( "empty", "" );
+  }
+
+  public void testReaderReset() throws Exception {
+    CharStream cs = new MappingCharFilter( normMap, new StringReader( "x" ) );
+    char[] buf = new char[10];
+    int len = cs.read(buf, 0, 10);
+    assertEquals( 1, len );
+    assertEquals( 'x', buf[0]) ;
+    len = cs.read(buf, 0, 10);
+    assertEquals( -1, len );
+
+    // rewind
+    cs.reset();
+    len = cs.read(buf, 0, 10);
+    assertEquals( 1, len );
+    assertEquals( 'x', buf[0]) ;
+  }
+
+  public void testNothingChange() throws Exception {
+    CharStream cs = new MappingCharFilter( normMap, new StringReader( "x" ) );
+    TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false);
+    assertTokenStreamContents(ts, new String[]{"x"}, new int[]{0}, new int[]{1});
+  }
+
+  public void test1to1() throws Exception {
+    CharStream cs = new MappingCharFilter( normMap, new StringReader( "h" ) );
+    TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false);
+    assertTokenStreamContents(ts, new String[]{"i"}, new int[]{0}, new int[]{1});
+  }
+
+  public void test1to2() throws Exception {
+    CharStream cs = new MappingCharFilter( normMap, new StringReader( "j" ) );
+    TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false);
+    assertTokenStreamContents(ts, new String[]{"jj"}, new int[]{0}, new int[]{1});
+  }
+
+  public void test1to3() throws Exception {
+    CharStream cs = new MappingCharFilter( normMap, new StringReader( "k" ) );
+    TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false);
+    assertTokenStreamContents(ts, new String[]{"kkk"}, new int[]{0}, new int[]{1});
+  }
+
+  public void test2to4() throws Exception {
+    CharStream cs = new MappingCharFilter( normMap, new StringReader( "ll" ) );
+    TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false);
+    assertTokenStreamContents(ts, new String[]{"llll"}, new int[]{0}, new int[]{2});
+  }
+
+  public void test2to1() throws Exception {
+    CharStream cs = new MappingCharFilter( normMap, new StringReader( "aa" ) );
+    TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false);
+    assertTokenStreamContents(ts, new String[]{"a"}, new int[]{0}, new int[]{2});
+  }
+
+  public void test3to1() throws Exception {
+    CharStream cs = new MappingCharFilter( normMap, new StringReader( "bbb" ) );
+    TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false);
+    assertTokenStreamContents(ts, new String[]{"b"}, new int[]{0}, new int[]{3});
+  }
+
+  public void test4to2() throws Exception {
+    CharStream cs = new MappingCharFilter( normMap, new StringReader( "cccc" ) );
+    TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false);
+    assertTokenStreamContents(ts, new String[]{"cc"}, new int[]{0}, new int[]{4});
+  }
+
+  public void test5to0() throws Exception {
+    CharStream cs = new MappingCharFilter( normMap, new StringReader( "empty" ) );
+    TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false);
+    assertTokenStreamContents(ts, new String[0]);
+  }
+
+  //
+  //                1111111111222
+  //      01234567890123456789012
+  //(in)  h i j k ll cccc bbb aa
+  //
+  //                1111111111222
+  //      01234567890123456789012
+  //(out) i i jj kkk llll cc b a
+  //
+  //    h, 0, 1 =>    i, 0, 1
+  //    i, 2, 3 =>    i, 2, 3
+  //    j, 4, 5 =>   jj, 4, 5
+  //    k, 6, 7 =>  kkk, 6, 7
+  //   ll, 8,10 => llll, 8,10
+  // cccc,11,15 =>   cc,11,15
+  //  bbb,16,19 =>    b,16,19
+  //   aa,20,22 =>    a,20,22
+  //
+  public void testTokenStream() throws Exception {
+    CharStream cs = new MappingCharFilter( normMap, CharReader.get( new StringReader( "h i j k ll cccc bbb aa" ) ) );
+    TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false);
+    assertTokenStreamContents(ts,
+      new String[]{"i","i","jj","kkk","llll","cc","b","a"},
+      new int[]{0,2,4,6,8,11,16,20},
+      new int[]{1,3,5,7,10,15,19,22}
+    );
+  }
+
+  //
+  //
+  //        0123456789
+  //(in)    aaaa ll h
+  //(out-1) aa llll i
+  //(out-2) a llllllll i
+  //
+  // aaaa,0,4 => a,0,4
+  //   ll,5,7 => llllllll,5,7
+  //    h,8,9 => i,8,9
+  public void testChained() throws Exception {
+    CharStream cs = new MappingCharFilter( normMap,
+        new MappingCharFilter( normMap, CharReader.get( new StringReader( "aaaa ll h" ) ) ) );
+    TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false);
+    assertTokenStreamContents(ts,
+      new String[]{"a","llllllll","i"},
+      new int[]{0,5,8},
+      new int[]{4,7,9}
+    );
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
new file mode 100644
index 0000000..4715993
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
@@ -0,0 +1,90 @@
+package org.apache.lucene.analysis;
+
+import java.io.StringReader;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestMockAnalyzer extends BaseTokenStreamTestCase {
+
+  /** Test a configuration that behaves a lot like WhitespaceAnalyzer */
+  public void testWhitespace() throws Exception {
+    Analyzer a = new MockAnalyzer(random);
+    assertAnalyzesTo(a, "A bc defg hiJklmn opqrstuv wxy z ",
+        new String[] { "a", "bc", "defg", "hijklmn", "opqrstuv", "wxy", "z" });
+    assertAnalyzesToReuse(a, "aba cadaba shazam",
+        new String[] { "aba", "cadaba", "shazam" });
+    assertAnalyzesToReuse(a, "break on whitespace",
+        new String[] { "break", "on", "whitespace" });
+  }
+  
+  /** Test a configuration that behaves a lot like SimpleAnalyzer */
+  public void testSimple() throws Exception {
+    Analyzer a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
+    assertAnalyzesTo(a, "a-bc123 defg+hijklmn567opqrstuv78wxy_z ",
+        new String[] { "a", "bc", "defg", "hijklmn", "opqrstuv", "wxy", "z" });
+    assertAnalyzesToReuse(a, "aba4cadaba-Shazam",
+        new String[] { "aba", "cadaba", "shazam" });
+    assertAnalyzesToReuse(a, "break+on/Letters",
+        new String[] { "break", "on", "letters" });
+  }
+  
+  /** Test a configuration that behaves a lot like KeywordAnalyzer */
+  public void testKeyword() throws Exception {
+    Analyzer a = new MockAnalyzer(random, MockTokenizer.KEYWORD, false);
+    assertAnalyzesTo(a, "a-bc123 defg+hijklmn567opqrstuv78wxy_z ",
+        new String[] { "a-bc123 defg+hijklmn567opqrstuv78wxy_z " });
+    assertAnalyzesToReuse(a, "aba4cadaba-Shazam",
+        new String[] { "aba4cadaba-Shazam" });
+    assertAnalyzesToReuse(a, "break+on/Nothing",
+        new String[] { "break+on/Nothing" });
+  }
+  
+  /** Test a configuration that behaves a lot like StopAnalyzer */
+  public void testStop() throws Exception {
+    Analyzer a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, (CharArraySet) StopAnalyzer.ENGLISH_STOP_WORDS_SET, true);
+    assertAnalyzesTo(a, "the quick brown a fox",
+        new String[] { "quick", "brown", "fox" },
+        new int[] { 2, 1, 2 });
+    
+    // disable positions
+    a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, (CharArraySet) StopAnalyzer.ENGLISH_STOP_WORDS_SET, false);
+    assertAnalyzesTo(a, "the quick brown a fox",
+        new String[] { "quick", "brown", "fox" },
+        new int[] { 1, 1, 1 });
+  }
+  
+  public void testLUCENE_3042() throws Exception {
+    String testString = "t";
+    
+    Analyzer analyzer = new MockAnalyzer(random);
+    TokenStream stream = analyzer.reusableTokenStream("dummy", new StringReader(testString));
+    stream.reset();
+    while (stream.incrementToken()) {
+      // consume
+    }
+    stream.end();
+    stream.close();
+    
+    assertAnalyzesToReuse(analyzer, testString, new String[] { "t" });
+  }
+
+  /** blast some random strings through the analyzer */
+  public void testRandomStrings() throws Exception {
+    checkRandomData(random, new MockAnalyzer(random), atLeast(1000));
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java
new file mode 100644
index 0000000..6571170
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java
@@ -0,0 +1,73 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+
+public class TestNumericTokenStream extends BaseTokenStreamTestCase {
+
+  static final long lvalue = 4573245871874382L;
+  static final int ivalue = 123456;
+
+  public void testLongStream() throws Exception {
+    final NumericTokenStream stream=new NumericTokenStream().setLongValue(lvalue);
+    // use getAttribute to test if attributes really exist, if not an IAE will be throwed
+    final CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
+    final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class);
+    for (int shift=0; shift<64; shift+=NumericUtils.PRECISION_STEP_DEFAULT) {
+      assertTrue("New token is available", stream.incrementToken());
+      assertEquals("Term is correctly encoded", NumericUtils.longToPrefixCoded(lvalue, shift), termAtt.toString());
+      assertEquals("Type correct", (shift == 0) ? NumericTokenStream.TOKEN_TYPE_FULL_PREC : NumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
+    }
+    assertFalse("No more tokens available", stream.incrementToken());
+  }
+
+  public void testIntStream() throws Exception {
+    final NumericTokenStream stream=new NumericTokenStream().setIntValue(ivalue);
+    // use getAttribute to test if attributes really exist, if not an IAE will be throwed
+    final CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
+    final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class);
+    for (int shift=0; shift<32; shift+=NumericUtils.PRECISION_STEP_DEFAULT) {
+      assertTrue("New token is available", stream.incrementToken());
+      assertEquals("Term is correctly encoded", NumericUtils.intToPrefixCoded(ivalue, shift), termAtt.toString());
+      assertEquals("Type correct", (shift == 0) ? NumericTokenStream.TOKEN_TYPE_FULL_PREC : NumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
+    }
+    assertFalse("No more tokens available", stream.incrementToken());
+  }
+  
+  public void testNotInitialized() throws Exception {
+    final NumericTokenStream stream=new NumericTokenStream();
+    
+    try {
+      stream.reset();
+      fail("reset() should not succeed.");
+    } catch (IllegalStateException e) {
+      // pass
+    }
+
+    try {
+      stream.incrementToken();
+      fail("incrementToken() should not succeed.");
+    } catch (IllegalStateException e) {
+      // pass
+    }
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java
new file mode 100644
index 0000000..790e6e8
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java
@@ -0,0 +1,48 @@
+package org.apache.lucene.analysis;
+
+import java.io.StringReader;
+
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestPerFieldAnalzyerWrapper extends BaseTokenStreamTestCase {
+  public void testPerField() throws Exception {
+    String text = "Qwerty";
+    PerFieldAnalyzerWrapper analyzer =
+              new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
+    analyzer.addAnalyzer("special", new SimpleAnalyzer(TEST_VERSION_CURRENT));
+
+    TokenStream tokenStream = analyzer.tokenStream("field",
+                                            new StringReader(text));
+    CharTermAttribute termAtt = tokenStream.getAttribute(CharTermAttribute.class);
+
+    assertTrue(tokenStream.incrementToken());
+    assertEquals("WhitespaceAnalyzer does not lowercase",
+                 "Qwerty",
+                 termAtt.toString());
+
+    tokenStream = analyzer.tokenStream("special",
+                                            new StringReader(text));
+    termAtt = tokenStream.getAttribute(CharTermAttribute.class);
+    assertTrue(tokenStream.incrementToken());
+    assertEquals("SimpleAnalyzer lowercases",
+                 "qwerty",
+                 termAtt.toString());
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestPorterStemFilter.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestPorterStemFilter.java
new file mode 100644
index 0000000..b0347c8
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestPorterStemFilter.java
@@ -0,0 +1,65 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+import java.io.StringReader;
+
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+
+import static org.apache.lucene.analysis.VocabularyAssert.*;
+
+/**
+ * Test the PorterStemFilter with Martin Porter's test data.
+ */
+public class TestPorterStemFilter extends BaseTokenStreamTestCase {  
+  Analyzer a = new ReusableAnalyzerBase() {
+    @Override
+    protected TokenStreamComponents createComponents(String fieldName,
+        Reader reader) {
+      Tokenizer t = new MockTokenizer(reader, MockTokenizer.KEYWORD, false);
+      return new TokenStreamComponents(t, new PorterStemFilter(t));
+    }
+  };
+
+  /**
+   * Run the stemmer against all strings in voc.txt
+   * The output should be the same as the string in output.txt
+   */
+  public void testPorterStemFilter() throws Exception {
+    assertVocabulary(a, getDataFile("porterTestData.zip"), "voc.txt", "output.txt");
+  }
+  
+  public void testWithKeywordAttribute() throws IOException {
+    CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
+    set.add("yourselves");
+    Tokenizer tokenizer = new MockTokenizer(new StringReader("yourselves yours"), MockTokenizer.WHITESPACE, false);
+    TokenStream filter = new PorterStemFilter(new KeywordMarkerFilter(tokenizer, set));   
+    assertTokenStreamContents(filter, new String[] {"yourselves", "your"});
+  }
+  
+  /** blast some random strings through the analyzer */
+  public void testRandomStrings() throws Exception {
+    checkRandomData(random, a, 10000*RANDOM_MULTIPLIER);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java
new file mode 100644
index 0000000..485ceef
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java
@@ -0,0 +1,242 @@
+package org.apache.lucene.analysis;
+
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.apache.lucene.util.Version;
+
+import java.io.IOException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.util.Arrays;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestStandardAnalyzer extends BaseTokenStreamTestCase {
+  
+  public void testHugeDoc() throws IOException {
+    StringBuilder sb = new StringBuilder();
+    char whitespace[] = new char[4094];
+    Arrays.fill(whitespace, ' ');
+    sb.append(whitespace);
+    sb.append("testing 1234");
+    String input = sb.toString();
+    StandardTokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
+    BaseTokenStreamTestCase.assertTokenStreamContents(tokenizer, new String[] { "testing", "1234" });
+  }
+
+  private Analyzer a = new ReusableAnalyzerBase() {
+    @Override
+    protected TokenStreamComponents createComponents
+      (String fieldName, Reader reader) {
+
+      Tokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
+      return new TokenStreamComponents(tokenizer);
+    }
+  };
+
+  public void testArmenian() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "Վիքիպեդիայի 13 միլիոն հոդվածները (4,600` հայերեն վիքիպեդիայում) գրվել են կամավորների կողմից ու համարյա բոլոր հոդվածները կարող է խմբագրել ցանկաց մարդ ով կարող է բացել Վիքիպեդիայի կայքը։",
+        new String[] { "Վիքիպեդիայի", "13", "միլիոն", "հոդվածները", "4,600", "հայերեն", "վիքիպեդիայում", "գրվել", "են", "կամավորների", "կողմից", 
+        "ու", "համարյա", "բոլոր", "հոդվածները", "կարող", "է", "խմբագրել", "ցանկաց", "մարդ", "ով", "կարող", "է", "բացել", "Վիքիպեդիայի", "կայքը" } );
+  }
+  
+  public void testAmharic() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "ዊኪፔድያ የባለ ብዙ ቋንቋ የተሟላ ትክክለኛና ነጻ መዝገበ ዕውቀት (ኢንሳይክሎፒዲያ) ነው። ማንኛውም",
+        new String[] { "ዊኪፔድያ", "የባለ", "ብዙ", "ቋንቋ", "የተሟላ", "ትክክለኛና", "ነጻ", "መዝገበ", "ዕውቀት", "ኢንሳይክሎፒዲያ", "ነው", "ማንኛውም" } );
+  }
+  
+  public void testArabic() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "الفيلم الوثائقي الأول عن ويكيبيديا يسمى \"الحقيقة بالأرقام: قصة ويكيبيديا\" (بالإنجليزية: Truth in Numbers: The Wikipedia Story)، سيتم إطلاقه في 2008.",
+        new String[] { "الفيلم", "الوثائقي", "الأول", "عن", "ويكيبيديا", "يسمى", "الحقيقة", "بالأرقام", "قصة", "ويكيبيديا",
+        "بالإنجليزية", "Truth", "in", "Numbers", "The", "Wikipedia", "Story", "سيتم", "إطلاقه", "في", "2008" } ); 
+  }
+  
+  public void testAramaic() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "ܘܝܩܝܦܕܝܐ (ܐܢܓܠܝܐ: Wikipedia) ܗܘ ܐܝܢܣܩܠܘܦܕܝܐ ܚܐܪܬܐ ܕܐܢܛܪܢܛ ܒܠܫܢ̈ܐ ܣܓܝܐ̈ܐ܂ ܫܡܗ ܐܬܐ ܡܢ ܡ̈ܠܬܐ ܕ\"ܘܝܩܝ\" ܘ\"ܐܝܢܣܩܠܘܦܕܝܐ\"܀",
+        new String[] { "ܘܝܩܝܦܕܝܐ", "ܐܢܓܠܝܐ", "Wikipedia", "ܗܘ", "ܐܝܢܣܩܠܘܦܕܝܐ", "ܚܐܪܬܐ", "ܕܐܢܛܪܢܛ", "ܒܠܫܢ̈ܐ", "ܣܓܝܐ̈ܐ", "ܫܡܗ",
+        "ܐܬܐ", "ܡܢ", "ܡ̈ܠܬܐ", "ܕ", "ܘܝܩܝ", "ܘ", "ܐܝܢܣܩܠܘܦܕܝܐ"});
+  }
+  
+  public void testBengali() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "এই বিশ্বকোষ পরিচালনা করে উইকিমিডিয়া ফাউন্ডেশন (একটি অলাভজনক সংস্থা)। উইকিপিডিয়ার শুরু ১৫ জানুয়ারি, ২০০১ সালে। এখন পর্যন্ত ২০০টিরও বেশী ভাষায় উইকিপিডিয়া রয়েছে।",
+        new String[] { "এই", "বিশ্বকোষ", "পরিচালনা", "করে", "উইকিমিডিয়া", "ফাউন্ডেশন", "একটি", "অলাভজনক", "সংস্থা", "উইকিপিডিয়ার",
+        "শুরু", "১৫", "জানুয়ারি", "২০০১", "সালে", "এখন", "পর্যন্ত", "২০০টিরও", "বেশী", "ভাষায়", "উইকিপিডিয়া", "রয়েছে" });
+  }
+  
+  public void testFarsi() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "ویکی پدیای انگلیسی در تاریخ ۲۵ دی ۱۳۷۹ به صورت مکملی برای دانشنامهٔ تخصصی نوپدیا نوشته شد.",
+        new String[] { "ویکی", "پدیای", "انگلیسی", "در", "تاریخ", "۲۵", "دی", "۱۳۷۹", "به", "صورت", "مکملی",
+        "برای", "دانشنامهٔ", "تخصصی", "نوپدیا", "نوشته", "شد" });
+  }
+  
+  public void testGreek() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "Γράφεται σε συνεργασία από εθελοντές με το λογισμικό wiki, κάτι που σημαίνει ότι άρθρα μπορεί να προστεθούν ή να αλλάξουν από τον καθένα.",
+        new String[] { "Γράφεται", "σε", "συνεργασία", "από", "εθελοντές", "με", "το", "λογισμικό", "wiki", "κάτι", "που",
+        "σημαίνει", "ότι", "άρθρα", "μπορεί", "να", "προστεθούν", "ή", "να", "αλλάξουν", "από", "τον", "καθένα" });
+  }
+
+  public void testThai() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "การที่ได้ต้องแสดงว่างานดี. แล้วเธอจะไปไหน? ๑๒๓๔",
+        new String[] { "การที่ได้ต้องแสดงว่างานดี", "แล้วเธอจะไปไหน", "๑๒๓๔" });
+  }
+  
+  public void testLao() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "ສາທາລະນະລັດ ປະຊາທິປະໄຕ ປະຊາຊົນລາວ", 
+        new String[] { "ສາທາລະນະລັດ", "ປະຊາທິປະໄຕ", "ປະຊາຊົນລາວ" });
+  }
+  
+  public void testTibetan() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "སྣོན་མཛོད་དང་ལས་འདིས་བོད་ཡིག་མི་ཉམས་གོང་འཕེལ་དུ་གཏོང་བར་ཧ་ཅང་དགེ་མཚན་མཆིས་སོ། །",
+                     new String[] { "སྣོན", "མཛོད", "དང", "ལས", "འདིས", "བོད", "ཡིག", 
+                                    "མི", "ཉམས", "གོང", "འཕེལ", "དུ", "གཏོང", "བར", 
+                                    "ཧ", "ཅང", "དགེ", "མཚན", "མཆིས", "སོ" });
+  }
+  
+  /*
+   * For chinese, tokenize as char (these can later form bigrams or whatever)
+   */
+  public void testChinese() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "我是中国人。 1234 Tests ",
+        new String[] { "我", "是", "中", "国", "人", "1234", "Tests"});
+  }
+  
+  public void testEmpty() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "", new String[] {});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, ".", new String[] {});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, " ", new String[] {});
+  }
+  
+  /* test various jira issues this analyzer is related to */
+  
+  public void testLUCENE1545() throws Exception {
+    /*
+     * Standard analyzer does not correctly tokenize combining character U+0364 COMBINING LATIN SMALL LETTRE E.
+     * The word "moͤchte" is incorrectly tokenized into "mo" "chte", the combining character is lost.
+     * Expected result is only on token "moͤchte".
+     */
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "moͤchte", new String[] { "moͤchte" }); 
+  }
+  
+  /* Tests from StandardAnalyzer, just to show behavior is similar */
+  public void testAlphanumericSA() throws Exception {
+    // alphanumeric tokens
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "B2B", new String[]{"B2B"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "2B", new String[]{"2B"});
+  }
+
+  public void testDelimitersSA() throws Exception {
+    // other delimiters: "-", "/", ","
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "some-dashed-phrase", new String[]{"some", "dashed", "phrase"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "dogs,chase,cats", new String[]{"dogs", "chase", "cats"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "ac/dc", new String[]{"ac", "dc"});
+  }
+
+  public void testApostrophesSA() throws Exception {
+    // internal apostrophes: O'Reilly, you're, O'Reilly's
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "O'Reilly", new String[]{"O'Reilly"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "you're", new String[]{"you're"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "she's", new String[]{"she's"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "Jim's", new String[]{"Jim's"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "don't", new String[]{"don't"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "O'Reilly's", new String[]{"O'Reilly's"});
+  }
+
+  public void testNumericSA() throws Exception {
+    // floating point, serial, model numbers, ip addresses, etc.
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "21.35", new String[]{"21.35"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "R2D2 C3PO", new String[]{"R2D2", "C3PO"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "216.239.63.104", new String[]{"216.239.63.104"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "216.239.63.104", new String[]{"216.239.63.104"});
+  }
+
+  public void testTextWithNumbersSA() throws Exception {
+    // numbers
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "David has 5000 bones", new String[]{"David", "has", "5000", "bones"});
+  }
+
+  public void testVariousTextSA() throws Exception {
+    // various
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "C embedded developers wanted", new String[]{"C", "embedded", "developers", "wanted"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "foo bar FOO BAR", new String[]{"foo", "bar", "FOO", "BAR"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "foo      bar .  FOO <> BAR", new String[]{"foo", "bar", "FOO", "BAR"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "\"QUOTED\" word", new String[]{"QUOTED", "word"});
+  }
+
+  public void testKoreanSA() throws Exception {
+    // Korean words
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "안녕하세요 한글입니다", new String[]{"안녕하세요", "한글입니다"});
+  }
+  
+  public void testOffsets() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "David has 5000 bones", 
+        new String[] {"David", "has", "5000", "bones"},
+        new int[] {0, 6, 10, 15},
+        new int[] {5, 9, 14, 20});
+  }
+  
+  public void testTypes() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "David has 5000 bones", 
+        new String[] {"David", "has", "5000", "bones"},
+        new String[] { "<ALPHANUM>", "<ALPHANUM>", "<NUM>", "<ALPHANUM>" });
+  }
+  
+  public void testUnicodeWordBreaks() throws Exception {
+    WordBreakTestUnicode_6_0_0 wordBreakTest = new WordBreakTestUnicode_6_0_0();
+    wordBreakTest.test(a);
+  }
+  
+  public void testSupplementary() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "𩬅艱鍟䇹愯瀛", 
+        new String[] {"𩬅", "艱", "鍟", "䇹", "愯", "瀛"},
+        new String[] { "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>" });
+  }
+  
+  public void testKorean() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "훈민정음",
+        new String[] { "훈민정음" },
+        new String[] { "<HANGUL>" });
+  }
+  
+  public void testJapanese() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "仮名遣い カタカナ",
+        new String[] { "仮", "名", "遣", "い", "カタカナ" },
+        new String[] { "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<HIRAGANA>", "<KATAKANA>" });
+  }
+  
+  public void testCombiningMarks() throws Exception {
+    checkOneTerm(a, "ざ", "ざ"); // hiragana
+    checkOneTerm(a, "ザ", "ザ"); // katakana
+    checkOneTerm(a, "壹゙", "壹゙"); // ideographic
+    checkOneTerm(a, "아゙",  "아゙"); // hangul
+  }
+  
+  /** @deprecated remove this and sophisticated backwards layer in 5.0 */
+  @Deprecated
+  public void testCombiningMarksBackwards() throws Exception {
+    Analyzer a = new StandardAnalyzer(Version.LUCENE_33);
+    checkOneTerm(a, "ざ", "さ"); // hiragana Bug
+    checkOneTerm(a, "ザ", "ザ"); // katakana Works
+    checkOneTerm(a, "壹゙", "壹"); // ideographic Bug
+    checkOneTerm(a, "아゙",  "아゙"); // hangul Works
+  }
+
+  /** blast some random strings through the analyzer */
+  public void testRandomStrings() throws Exception {
+    checkRandomData(random, new StandardAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java
new file mode 100644
index 0000000..e3e0b76
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java
@@ -0,0 +1,97 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.util.Version;
+
+import java.io.StringReader;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Set;
+import java.util.HashSet;
+
+public class TestStopAnalyzer extends BaseTokenStreamTestCase {
+  
+  private StopAnalyzer stop = new StopAnalyzer(TEST_VERSION_CURRENT);
+  private Set<Object> inValidTokens = new HashSet<Object>();
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    
+    Iterator<?> it = StopAnalyzer.ENGLISH_STOP_WORDS_SET.iterator();
+    while(it.hasNext()) {
+      inValidTokens.add(it.next());
+    }
+  }
+
+  public void testDefaults() throws IOException {
+    assertTrue(stop != null);
+    StringReader reader = new StringReader("This is a test of the english stop analyzer");
+    TokenStream stream = stop.tokenStream("test", reader);
+    assertTrue(stream != null);
+    CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
+    
+    while (stream.incrementToken()) {
+      assertFalse(inValidTokens.contains(termAtt.toString()));
+    }
+  }
+
+  public void testStopList() throws IOException {
+    Set<Object> stopWordsSet = new HashSet<Object>();
+    stopWordsSet.add("good");
+    stopWordsSet.add("test");
+    stopWordsSet.add("analyzer");
+    StopAnalyzer newStop = new StopAnalyzer(Version.LUCENE_24, stopWordsSet);
+    StringReader reader = new StringReader("This is a good test of the english stop analyzer");
+    TokenStream stream = newStop.tokenStream("test", reader);
+    assertNotNull(stream);
+    CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
+    PositionIncrementAttribute posIncrAtt = stream.addAttribute(PositionIncrementAttribute.class);
+    
+    while (stream.incrementToken()) {
+      String text = termAtt.toString();
+      assertFalse(stopWordsSet.contains(text));
+      assertEquals(1,posIncrAtt.getPositionIncrement()); // in 2.4 stop tokenizer does not apply increments.
+    }
+  }
+
+  public void testStopListPositions() throws IOException {
+    Set<Object> stopWordsSet = new HashSet<Object>();
+    stopWordsSet.add("good");
+    stopWordsSet.add("test");
+    stopWordsSet.add("analyzer");
+    StopAnalyzer newStop = new StopAnalyzer(TEST_VERSION_CURRENT, stopWordsSet);
+    StringReader reader = new StringReader("This is a good test of the english stop analyzer with positions");
+    int expectedIncr[] =                  { 1,   1, 1,          3, 1,  1,      1,            2,   1};
+    TokenStream stream = newStop.tokenStream("test", reader);
+    assertNotNull(stream);
+    int i = 0;
+    CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
+    PositionIncrementAttribute posIncrAtt = stream.addAttribute(PositionIncrementAttribute.class);
+
+    while (stream.incrementToken()) {
+      String text = termAtt.toString();
+      assertFalse(stopWordsSet.contains(text));
+      assertEquals(expectedIncr[i++],posIncrAtt.getPositionIncrement());
+    }
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestStopFilter.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestStopFilter.java
new file mode 100644
index 0000000..2b5865e
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestStopFilter.java
@@ -0,0 +1,131 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Copyright 2005 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.util.English;
+import org.apache.lucene.util.Version;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Set;
+import java.util.HashSet;
+
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+
+public class TestStopFilter extends BaseTokenStreamTestCase {
+  
+  // other StopFilter functionality is already tested by TestStopAnalyzer
+
+  public void testExactCase() throws IOException {
+    StringReader reader = new StringReader("Now is The Time");
+    Set<String> stopWords = new HashSet<String>(Arrays.asList("is", "the", "Time"));
+    TokenStream stream = new StopFilter(TEST_VERSION_CURRENT, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false), stopWords, false);
+    assertTokenStreamContents(stream, new String[] { "Now", "The" });
+  }
+
+  public void testIgnoreCase() throws IOException {
+    StringReader reader = new StringReader("Now is The Time");
+    Set<Object> stopWords = new HashSet<Object>(Arrays.asList( "is", "the", "Time" ));
+    TokenStream stream = new StopFilter(TEST_VERSION_CURRENT, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false), stopWords, true);
+    assertTokenStreamContents(stream, new String[] { "Now" });
+  }
+
+  public void testStopFilt() throws IOException {
+    StringReader reader = new StringReader("Now is The Time");
+    String[] stopWords = new String[] { "is", "the", "Time" };
+    Set<Object> stopSet = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords);
+    TokenStream stream = new StopFilter(TEST_VERSION_CURRENT, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false), stopSet);
+    assertTokenStreamContents(stream, new String[] { "Now", "The" });
+  }
+
+  /**
+   * Test Position increments applied by StopFilter with and without enabling this option.
+   */
+  public void testStopPositons() throws IOException {
+    StringBuilder sb = new StringBuilder();
+    ArrayList<String> a = new ArrayList<String>();
+    for (int i=0; i<20; i++) {
+      String w = English.intToEnglish(i).trim();
+      sb.append(w).append(" ");
+      if (i%3 != 0) a.add(w);
+    }
+    log(sb.toString());
+    String stopWords[] = a.toArray(new String[0]);
+    for (int i=0; i<a.size(); i++) log("Stop: "+stopWords[i]);
+    Set<Object> stopSet = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords);
+    // with increments
+    StringReader reader = new StringReader(sb.toString());
+    StopFilter stpf = new StopFilter(Version.LUCENE_24, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false), stopSet);
+    doTestStopPositons(stpf,true);
+    // without increments
+    reader = new StringReader(sb.toString());
+    stpf = new StopFilter(TEST_VERSION_CURRENT, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false), stopSet);
+    doTestStopPositons(stpf,false);
+    // with increments, concatenating two stop filters
+    ArrayList<String> a0 = new ArrayList<String>();
+    ArrayList<String> a1 = new ArrayList<String>();
+    for (int i=0; i<a.size(); i++) {
+      if (i%2==0) { 
+        a0.add(a.get(i));
+      } else {
+        a1.add(a.get(i));
+      }
+    }
+    String stopWords0[] =  a0.toArray(new String[0]);
+    for (int i=0; i<a0.size(); i++) log("Stop0: "+stopWords0[i]);
+    String stopWords1[] =  a1.toArray(new String[0]);
+    for (int i=0; i<a1.size(); i++) log("Stop1: "+stopWords1[i]);
+    Set<Object> stopSet0 = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords0);
+    Set<Object> stopSet1 = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords1);
+    reader = new StringReader(sb.toString());
+    StopFilter stpf0 = new StopFilter(TEST_VERSION_CURRENT, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false), stopSet0); // first part of the set
+    stpf0.setEnablePositionIncrements(true);
+    StopFilter stpf01 = new StopFilter(TEST_VERSION_CURRENT, stpf0, stopSet1); // two stop filters concatenated!
+    doTestStopPositons(stpf01,true);
+  }
+  
+  private void doTestStopPositons(StopFilter stpf, boolean enableIcrements) throws IOException {
+    log("---> test with enable-increments-"+(enableIcrements?"enabled":"disabled"));
+    stpf.setEnablePositionIncrements(enableIcrements);
+    CharTermAttribute termAtt = stpf.getAttribute(CharTermAttribute.class);
+    PositionIncrementAttribute posIncrAtt = stpf.getAttribute(PositionIncrementAttribute.class);
+    stpf.reset();
+    for (int i=0; i<20; i+=3) {
+      assertTrue(stpf.incrementToken());
+      log("Token "+i+": "+stpf);
+      String w = English.intToEnglish(i).trim();
+      assertEquals("expecting token "+i+" to be "+w,w,termAtt.toString());
+      assertEquals("all but first token must have position increment of 3",enableIcrements?(i==0?1:3):1,posIncrAtt.getPositionIncrement());
+    }
+    assertFalse(stpf.incrementToken());
+    stpf.end();
+    stpf.close();
+  }
+  
+  // print debug info depending on VERBOSE
+  private static void log(String s) {
+    if (VERBOSE) {
+      System.out.println(s);
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java
new file mode 100644
index 0000000..14b766e
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java
@@ -0,0 +1,227 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Copyright 2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.standard.StandardFilter;
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.English;
+import java.io.IOException;
+import java.io.StringReader;
+
+
+/**
+ * tests for the TestTeeSinkTokenFilter
+ */
+public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
+  protected StringBuilder buffer1;
+  protected StringBuilder buffer2;
+  protected String[] tokens1;
+  protected String[] tokens2;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    tokens1 = new String[]{"The", "quick", "Burgundy", "Fox", "jumped", "over", "the", "lazy", "Red", "Dogs"};
+    tokens2 = new String[]{"The", "Lazy", "Dogs", "should", "stay", "on", "the", "porch"};
+    buffer1 = new StringBuilder();
+
+    for (int i = 0; i < tokens1.length; i++) {
+      buffer1.append(tokens1[i]).append(' ');
+    }
+    buffer2 = new StringBuilder();
+    for (int i = 0; i < tokens2.length; i++) {
+      buffer2.append(tokens2[i]).append(' ');
+    }
+  }
+
+  static final TeeSinkTokenFilter.SinkFilter theFilter = new TeeSinkTokenFilter.SinkFilter() {
+    @Override
+    public boolean accept(AttributeSource a) {
+      CharTermAttribute termAtt = a.getAttribute(CharTermAttribute.class);
+      return termAtt.toString().equalsIgnoreCase("The");
+    }
+  };
+
+  static final TeeSinkTokenFilter.SinkFilter dogFilter = new TeeSinkTokenFilter.SinkFilter() {
+    @Override
+    public boolean accept(AttributeSource a) {
+      CharTermAttribute termAtt = a.getAttribute(CharTermAttribute.class);
+      return termAtt.toString().equalsIgnoreCase("Dogs");
+    }
+  };
+
+  public void testGeneral() throws IOException {
+    final TeeSinkTokenFilter source = new TeeSinkTokenFilter(new MockTokenizer(new StringReader(buffer1.toString()), MockTokenizer.WHITESPACE, false));
+    final TokenStream sink1 = source.newSinkTokenStream();
+    final TokenStream sink2 = source.newSinkTokenStream(theFilter);
+    
+    source.addAttribute(CheckClearAttributesAttribute.class);
+    sink1.addAttribute(CheckClearAttributesAttribute.class);
+    sink2.addAttribute(CheckClearAttributesAttribute.class);
+    
+    assertTokenStreamContents(source, tokens1);
+    assertTokenStreamContents(sink1, tokens1);
+    assertTokenStreamContents(sink2, new String[]{"The", "the"});
+  }
+
+  public void testMultipleSources() throws Exception {
+    final TeeSinkTokenFilter tee1 = new TeeSinkTokenFilter(new MockTokenizer(new StringReader(buffer1.toString()), MockTokenizer.WHITESPACE, false));
+    final TeeSinkTokenFilter.SinkTokenStream dogDetector = tee1.newSinkTokenStream(dogFilter);
+    final TeeSinkTokenFilter.SinkTokenStream theDetector = tee1.newSinkTokenStream(theFilter);
+    tee1.reset();
+    final TokenStream source1 = new CachingTokenFilter(tee1);
+    
+    tee1.addAttribute(CheckClearAttributesAttribute.class);
+    dogDetector.addAttribute(CheckClearAttributesAttribute.class);
+    theDetector.addAttribute(CheckClearAttributesAttribute.class);
+
+    final TeeSinkTokenFilter tee2 = new TeeSinkTokenFilter(new MockTokenizer(new StringReader(buffer2.toString()), MockTokenizer.WHITESPACE, false));
+    tee2.addSinkTokenStream(dogDetector);
+    tee2.addSinkTokenStream(theDetector);
+    final TokenStream source2 = tee2;
+
+    assertTokenStreamContents(source1, tokens1);
+    assertTokenStreamContents(source2, tokens2);
+
+    assertTokenStreamContents(theDetector, new String[]{"The", "the", "The", "the"});
+    assertTokenStreamContents(dogDetector, new String[]{"Dogs", "Dogs"});
+    
+    source1.reset();
+    TokenStream lowerCasing = new LowerCaseFilter(TEST_VERSION_CURRENT, source1);
+    String[] lowerCaseTokens = new String[tokens1.length];
+    for (int i = 0; i < tokens1.length; i++)
+      lowerCaseTokens[i] = tokens1[i].toLowerCase();
+    assertTokenStreamContents(lowerCasing, lowerCaseTokens);
+  }
+
+  /**
+   * Not an explicit test, just useful to print out some info on performance
+   *
+   * @throws Exception
+   */
+  public void performance() throws Exception {
+    int[] tokCount = {100, 500, 1000, 2000, 5000, 10000};
+    int[] modCounts = {1, 2, 5, 10, 20, 50, 100, 200, 500};
+    for (int k = 0; k < tokCount.length; k++) {
+      StringBuilder buffer = new StringBuilder();
+      System.out.println("-----Tokens: " + tokCount[k] + "-----");
+      for (int i = 0; i < tokCount[k]; i++) {
+        buffer.append(English.intToEnglish(i).toUpperCase()).append(' ');
+      }
+      //make sure we produce the same tokens
+      TeeSinkTokenFilter teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))));
+      TokenStream sink = teeStream.newSinkTokenStream(new ModuloSinkFilter(100));
+      teeStream.consumeAllTokens();
+      TokenStream stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))), 100);
+      CharTermAttribute tfTok = stream.addAttribute(CharTermAttribute.class);
+      CharTermAttribute sinkTok = sink.addAttribute(CharTermAttribute.class);
+      for (int i=0; stream.incrementToken(); i++) {
+        assertTrue(sink.incrementToken());
+        assertTrue(tfTok + " is not equal to " + sinkTok + " at token: " + i, tfTok.equals(sinkTok) == true);
+      }
+      
+      //simulate two fields, each being analyzed once, for 20 documents
+      for (int j = 0; j < modCounts.length; j++) {
+        int tfPos = 0;
+        long start = System.currentTimeMillis();
+        for (int i = 0; i < 20; i++) {
+          stream = new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString())));
+          PositionIncrementAttribute posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class);
+          while (stream.incrementToken()) {
+            tfPos += posIncrAtt.getPositionIncrement();
+          }
+          stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))), modCounts[j]);
+          posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class);
+          while (stream.incrementToken()) {
+            tfPos += posIncrAtt.getPositionIncrement();
+          }
+        }
+        long finish = System.currentTimeMillis();
+        System.out.println("ModCount: " + modCounts[j] + " Two fields took " + (finish - start) + " ms");
+        int sinkPos = 0;
+        //simulate one field with one sink
+        start = System.currentTimeMillis();
+        for (int i = 0; i < 20; i++) {
+          teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))));
+          sink = teeStream.newSinkTokenStream(new ModuloSinkFilter(modCounts[j]));
+          PositionIncrementAttribute posIncrAtt = teeStream.getAttribute(PositionIncrementAttribute.class);
+          while (teeStream.incrementToken()) {
+            sinkPos += posIncrAtt.getPositionIncrement();
+          }
+          //System.out.println("Modulo--------");
+          posIncrAtt = sink.getAttribute(PositionIncrementAttribute.class);
+          while (sink.incrementToken()) {
+            sinkPos += posIncrAtt.getPositionIncrement();
+          }
+        }
+        finish = System.currentTimeMillis();
+        System.out.println("ModCount: " + modCounts[j] + " Tee fields took " + (finish - start) + " ms");
+        assertTrue(sinkPos + " does not equal: " + tfPos, sinkPos == tfPos);
+
+      }
+      System.out.println("- End Tokens: " + tokCount[k] + "-----");
+    }
+
+  }
+
+
+  class ModuloTokenFilter extends TokenFilter {
+
+    int modCount;
+
+    ModuloTokenFilter(TokenStream input, int mc) {
+      super(input);
+      modCount = mc;
+    }
+
+    int count = 0;
+
+    //return every 100 tokens
+    @Override
+    public boolean incrementToken() throws IOException {
+      boolean hasNext;
+      for (hasNext = input.incrementToken();
+           hasNext && count % modCount != 0;
+           hasNext = input.incrementToken()) {
+        count++;
+      }
+      count++;
+      return hasNext;
+    }
+  }
+
+  class ModuloSinkFilter extends TeeSinkTokenFilter.SinkFilter {
+    int count = 0;
+    int modCount;
+
+    ModuloSinkFilter(int mc) {
+      modCount = mc;
+    }
+
+    @Override
+    public boolean accept(AttributeSource a) {
+      boolean b = (a != null && count % modCount == 0);
+      count++;
+      return b;
+    }
+
+  }
+}
+
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestToken.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestToken.java
new file mode 100644
index 0000000..caaaa29
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestToken.java
@@ -0,0 +1,273 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.Payload;
+import org.apache.lucene.analysis.tokenattributes.*;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Attribute;
+import org.apache.lucene.util.AttributeImpl;
+import org.apache.lucene.util._TestUtil;
+
+import java.io.StringReader;
+import java.util.HashMap;
+
+public class TestToken extends LuceneTestCase {
+
+  public void testCtor() throws Exception {
+    Token t = new Token();
+    char[] content = "hello".toCharArray();
+    t.setTermBuffer(content, 0, content.length);
+    assertNotSame(t.termBuffer(), content);
+    assertEquals(0, t.startOffset());
+    assertEquals(0, t.endOffset());
+    assertEquals("hello", t.term());
+    assertEquals("word", t.type());
+    assertEquals(0, t.getFlags());
+
+    t = new Token(6, 22);
+    t.setTermBuffer(content, 0, content.length);
+    assertEquals("hello", t.term());
+    assertEquals("hello", t.toString());
+    assertEquals(6, t.startOffset());
+    assertEquals(22, t.endOffset());
+    assertEquals("word", t.type());
+    assertEquals(0, t.getFlags());
+
+    t = new Token(6, 22, 7);
+    t.setTermBuffer(content, 0, content.length);
+    assertEquals("hello", t.term());
+    assertEquals("hello", t.toString());
+    assertEquals(6, t.startOffset());
+    assertEquals(22, t.endOffset());
+    assertEquals("word", t.type());
+    assertEquals(7, t.getFlags());
+
+    t = new Token(6, 22, "junk");
+    t.setTermBuffer(content, 0, content.length);
+    assertEquals("hello", t.term());
+    assertEquals("hello", t.toString());
+    assertEquals(6, t.startOffset());
+    assertEquals(22, t.endOffset());
+    assertEquals("junk", t.type());
+    assertEquals(0, t.getFlags());
+  }
+
+  public void testResize() {
+    Token t = new Token();
+    char[] content = "hello".toCharArray();
+    t.setTermBuffer(content, 0, content.length);
+    for (int i = 0; i < 2000; i++)
+    {
+      t.resizeTermBuffer(i);
+      assertTrue(i <= t.termBuffer().length);
+      assertEquals("hello", t.term());
+    }
+  }
+
+  public void testGrow() {
+    Token t = new Token();
+    StringBuilder buf = new StringBuilder("ab");
+    for (int i = 0; i < 20; i++)
+    {
+      char[] content = buf.toString().toCharArray();
+      t.setTermBuffer(content, 0, content.length);
+      assertEquals(buf.length(), t.termLength());
+      assertEquals(buf.toString(), t.term());
+      buf.append(buf.toString());
+    }
+    assertEquals(1048576, t.termLength());
+
+    // now as a string, first variant
+    t = new Token();
+    buf = new StringBuilder("ab");
+    for (int i = 0; i < 20; i++)
+    {
+      String content = buf.toString();
+      t.setTermBuffer(content, 0, content.length());
+      assertEquals(content.length(), t.termLength());
+      assertEquals(content, t.term());
+      buf.append(content);
+    }
+    assertEquals(1048576, t.termLength());
+
+    // now as a string, second variant
+    t = new Token();
+    buf = new StringBuilder("ab");
+    for (int i = 0; i < 20; i++)
+    {
+      String content = buf.toString();
+      t.setTermBuffer(content);
+      assertEquals(content.length(), t.termLength());
+      assertEquals(content, t.term());
+      buf.append(content);
+    }
+    assertEquals(1048576, t.termLength());
+
+    // Test for slow growth to a long term
+    t = new Token();
+    buf = new StringBuilder("a");
+    for (int i = 0; i < 20000; i++)
+    {
+      String content = buf.toString();
+      t.setTermBuffer(content);
+      assertEquals(content.length(), t.termLength());
+      assertEquals(content, t.term());
+      buf.append("a");
+    }
+    assertEquals(20000, t.termLength());
+
+    // Test for slow growth to a long term
+    t = new Token();
+    buf = new StringBuilder("a");
+    for (int i = 0; i < 20000; i++)
+    {
+      String content = buf.toString();
+      t.setTermBuffer(content);
+      assertEquals(content.length(), t.termLength());
+      assertEquals(content, t.term());
+      buf.append("a");
+    }
+    assertEquals(20000, t.termLength());
+  }
+
+  public void testToString() throws Exception {
+    char[] b = {'a', 'l', 'o', 'h', 'a'};
+    Token t = new Token("", 0, 5);
+    t.setTermBuffer(b, 0, 5);
+    assertEquals("aloha", t.toString());
+
+    t.setTermBuffer("hi there");
+    assertEquals("hi there", t.toString());
+  }
+
+  public void testTermBufferEquals() throws Exception {
+    Token t1a = new Token();
+    char[] content1a = "hello".toCharArray();
+    t1a.setTermBuffer(content1a, 0, 5);
+    Token t1b = new Token();
+    char[] content1b = "hello".toCharArray();
+    t1b.setTermBuffer(content1b, 0, 5);
+    Token t2 = new Token();
+    char[] content2 = "hello2".toCharArray();
+    t2.setTermBuffer(content2, 0, 6);
+    assertTrue(t1a.equals(t1b));
+    assertFalse(t1a.equals(t2));
+    assertFalse(t2.equals(t1b));
+  }
+  
+  public void testMixedStringArray() throws Exception {
+    Token t = new Token("hello", 0, 5);
+    assertEquals(t.termLength(), 5);
+    assertEquals(t.term(), "hello");
+    t.setTermBuffer("hello2");
+    assertEquals(t.termLength(), 6);
+    assertEquals(t.term(), "hello2");
+    t.setTermBuffer("hello3".toCharArray(), 0, 6);
+    assertEquals(t.term(), "hello3");
+
+    char[] buffer = t.termBuffer();
+    buffer[1] = 'o';
+    assertEquals(t.term(), "hollo3");
+  }
+  
+  public void testClone() throws Exception {
+    Token t = new Token(0, 5);
+    char[] content = "hello".toCharArray();
+    t.setTermBuffer(content, 0, 5);
+    char[] buf = t.termBuffer();
+    Token copy = (Token) TestSimpleAttributeImpls.assertCloneIsEqual(t);
+    assertEquals(t.term(), copy.term());
+    assertNotSame(buf, copy.termBuffer());
+
+    Payload pl = new Payload(new byte[]{1,2,3,4});
+    t.setPayload(pl);
+    copy = (Token) TestSimpleAttributeImpls.assertCloneIsEqual(t);
+    assertEquals(pl, copy.getPayload());
+    assertNotSame(pl, copy.getPayload());
+  }
+  
+  public void testCopyTo() throws Exception {
+    Token t = new Token();
+    Token copy = (Token) TestSimpleAttributeImpls.assertCopyIsEqual(t);
+    assertEquals("", t.term());
+    assertEquals("", copy.term());
+
+    t = new Token(0, 5);
+    char[] content = "hello".toCharArray();
+    t.setTermBuffer(content, 0, 5);
+    char[] buf = t.termBuffer();
+    copy = (Token) TestSimpleAttributeImpls.assertCopyIsEqual(t);
+    assertEquals(t.term(), copy.term());
+    assertNotSame(buf, copy.termBuffer());
+
+    Payload pl = new Payload(new byte[]{1,2,3,4});
+    t.setPayload(pl);
+    copy = (Token) TestSimpleAttributeImpls.assertCopyIsEqual(t);
+    assertEquals(pl, copy.getPayload());
+    assertNotSame(pl, copy.getPayload());
+  }
+  
+  public interface SenselessAttribute extends Attribute {}
+  
+  public static final class SenselessAttributeImpl extends AttributeImpl implements SenselessAttribute {
+    @Override
+    public void copyTo(AttributeImpl target) {}
+    @Override
+    public void clear() {}
+    @Override
+    public boolean equals(Object o) { return (o instanceof SenselessAttributeImpl); }
+    @Override
+    public int hashCode() { return 0; }
+  }
+
+  public void testTokenAttributeFactory() throws Exception {
+    TokenStream ts = new WhitespaceTokenizer(Token.TOKEN_ATTRIBUTE_FACTORY, new StringReader("foo bar"));
+    
+    assertTrue("SenselessAttribute is not implemented by SenselessAttributeImpl",
+      ts.addAttribute(SenselessAttribute.class) instanceof SenselessAttributeImpl);
+    
+    assertTrue("CharTermAttribute is not implemented by Token",
+      ts.addAttribute(CharTermAttribute.class) instanceof Token);
+    assertTrue("OffsetAttribute is not implemented by Token",
+      ts.addAttribute(OffsetAttribute.class) instanceof Token);
+    assertTrue("FlagsAttribute is not implemented by Token",
+      ts.addAttribute(FlagsAttribute.class) instanceof Token);
+    assertTrue("PayloadAttribute is not implemented by Token",
+      ts.addAttribute(PayloadAttribute.class) instanceof Token);
+    assertTrue("PositionIncrementAttribute is not implemented by Token", 
+      ts.addAttribute(PositionIncrementAttribute.class) instanceof Token);
+    assertTrue("TypeAttribute is not implemented by Token",
+      ts.addAttribute(TypeAttribute.class) instanceof Token);
+  }
+
+  public void testAttributeReflection() throws Exception {
+    Token t = new Token("foobar", 6, 22, 8);
+    _TestUtil.assertAttributeReflection(t,
+      new HashMap<String,Object>() {{
+        put(CharTermAttribute.class.getName() + "#term", "foobar");
+        put(OffsetAttribute.class.getName() + "#startOffset", 6);
+        put(OffsetAttribute.class.getName() + "#endOffset", 22);
+        put(PositionIncrementAttribute.class.getName() + "#positionIncrement", 1);
+        put(PayloadAttribute.class.getName() + "#payload", null);
+        put(TypeAttribute.class.getName() + "#type", TypeAttribute.DEFAULT_TYPE);
+        put(FlagsAttribute.class.getName() + "#flags", 8);
+      }});
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/TestUAX29URLEmailTokenizer.java b/lucene/backwards/src/test/org/apache/lucene/analysis/TestUAX29URLEmailTokenizer.java
new file mode 100644
index 0000000..01a104c
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/TestUAX29URLEmailTokenizer.java
@@ -0,0 +1,447 @@
+package org.apache.lucene.analysis;
+
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+import org.apache.lucene.util.Version;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.io.StringReader;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestUAX29URLEmailTokenizer extends BaseTokenStreamTestCase {
+  
+  public void testHugeDoc() throws IOException {
+    StringBuilder sb = new StringBuilder();
+    char whitespace[] = new char[4094];
+    Arrays.fill(whitespace, ' ');
+    sb.append(whitespace);
+    sb.append("testing 1234");
+    String input = sb.toString();
+    UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
+    BaseTokenStreamTestCase.assertTokenStreamContents(tokenizer, new String[] { "testing", "1234" });
+  }
+
+  private Analyzer a = new ReusableAnalyzerBase() {
+    @Override
+    protected TokenStreamComponents createComponents
+      (String fieldName, Reader reader) {
+
+      Tokenizer tokenizer = new UAX29URLEmailTokenizer(TEST_VERSION_CURRENT, reader);
+      return new TokenStreamComponents(tokenizer);
+    }
+  };
+
+
+  /** Passes through tokens with type "<URL>" and blocks all other types. */
+  private class URLFilter extends TokenFilter {
+    private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
+    public URLFilter(TokenStream in) {
+      super(in);
+    }
+    @Override
+    public final boolean incrementToken() throws java.io.IOException {
+      boolean isTokenAvailable = false;
+      while (input.incrementToken()) {
+        if (typeAtt.type() == UAX29URLEmailTokenizer.TOKEN_TYPES[UAX29URLEmailTokenizer.URL]) {
+          isTokenAvailable = true;
+          break;
+        }
+      }
+      return isTokenAvailable;
+    }
+  }
+  
+  /** Passes through tokens with type "<EMAIL>" and blocks all other types. */
+  private class EmailFilter extends TokenFilter {
+    private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
+    public EmailFilter(TokenStream in) {
+      super(in);
+    }
+    @Override
+    public final boolean incrementToken() throws java.io.IOException {
+      boolean isTokenAvailable = false;
+      while (input.incrementToken()) {
+        if (typeAtt.type() == UAX29URLEmailTokenizer.TOKEN_TYPES[UAX29URLEmailTokenizer.EMAIL]) {
+          isTokenAvailable = true;
+          break;
+        }
+      }
+      return isTokenAvailable;
+    }
+  }
+
+  private Analyzer urlAnalyzer = new ReusableAnalyzerBase() {
+    @Override
+    protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+      UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(TEST_VERSION_CURRENT, reader);
+      tokenizer.setMaxTokenLength(Integer.MAX_VALUE);  // Tokenize arbitrary length URLs
+      TokenFilter filter = new URLFilter(tokenizer);
+      return new TokenStreamComponents(tokenizer, filter);
+    }
+  };
+
+  private Analyzer emailAnalyzer = new ReusableAnalyzerBase() {
+    @Override
+    protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+      UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(TEST_VERSION_CURRENT, reader);
+      TokenFilter filter = new EmailFilter(tokenizer);
+      return new TokenStreamComponents(tokenizer, filter);
+    }
+  };
+  
+  
+  public void testArmenian() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "Վիքիպեդիայի 13 միլիոն հոդվածները (4,600` հայերեն վիքիպեդիայում) գրվել են կամավորների կողմից ու համարյա բոլոր հոդվածները կարող է խմբագրել ցանկաց մարդ ով կարող է բացել Վիքիպեդիայի կայքը։",
+        new String[] { "Վիքիպեդիայի", "13", "միլիոն", "հոդվածները", "4,600", "հայերեն", "վիքիպեդիայում", "գրվել", "են", "կամավորների", "կողմից", 
+        "ու", "համարյա", "բոլոր", "հոդվածները", "կարող", "է", "խմբագրել", "ցանկաց", "մարդ", "ով", "կարող", "է", "բացել", "Վիքիպեդիայի", "կայքը" } );
+  }
+  
+  public void testAmharic() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "ዊኪፔድያ የባለ ብዙ ቋንቋ የተሟላ ትክክለኛና ነጻ መዝገበ ዕውቀት (ኢንሳይክሎፒዲያ) ነው። ማንኛውም",
+        new String[] { "ዊኪፔድያ", "የባለ", "ብዙ", "ቋንቋ", "የተሟላ", "ትክክለኛና", "ነጻ", "መዝገበ", "ዕውቀት", "ኢንሳይክሎፒዲያ", "ነው", "ማንኛውም" } );
+  }
+  
+  public void testArabic() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "الفيلم الوثائقي الأول عن ويكيبيديا يسمى \"الحقيقة بالأرقام: قصة ويكيبيديا\" (بالإنجليزية: Truth in Numbers: The Wikipedia Story)، سيتم إطلاقه في 2008.",
+        new String[] { "الفيلم", "الوثائقي", "الأول", "عن", "ويكيبيديا", "يسمى", "الحقيقة", "بالأرقام", "قصة", "ويكيبيديا",
+        "بالإنجليزية", "Truth", "in", "Numbers", "The", "Wikipedia", "Story", "سيتم", "إطلاقه", "في", "2008" } ); 
+  }
+  
+  public void testAramaic() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "ܘܝܩܝܦܕܝܐ (ܐܢܓܠܝܐ: Wikipedia) ܗܘ ܐܝܢܣܩܠܘܦܕܝܐ ܚܐܪܬܐ ܕܐܢܛܪܢܛ ܒܠܫܢ̈ܐ ܣܓܝܐ̈ܐ܂ ܫܡܗ ܐܬܐ ܡܢ ܡ̈ܠܬܐ ܕ\"ܘܝܩܝ\" ܘ\"ܐܝܢܣܩܠܘܦܕܝܐ\"܀",
+        new String[] { "ܘܝܩܝܦܕܝܐ", "ܐܢܓܠܝܐ", "Wikipedia", "ܗܘ", "ܐܝܢܣܩܠܘܦܕܝܐ", "ܚܐܪܬܐ", "ܕܐܢܛܪܢܛ", "ܒܠܫܢ̈ܐ", "ܣܓܝܐ̈ܐ", "ܫܡܗ",
+        "ܐܬܐ", "ܡܢ", "ܡ̈ܠܬܐ", "ܕ", "ܘܝܩܝ", "ܘ", "ܐܝܢܣܩܠܘܦܕܝܐ"});
+  }
+  
+  public void testBengali() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "এই বিশ্বকোষ পরিচালনা করে উইকিমিডিয়া ফাউন্ডেশন (একটি অলাভজনক সংস্থা)। উইকিপিডিয়ার শুরু ১৫ জানুয়ারি, ২০০১ সালে। এখন পর্যন্ত ২০০টিরও বেশী ভাষায় উইকিপিডিয়া রয়েছে।",
+        new String[] { "এই", "বিশ্বকোষ", "পরিচালনা", "করে", "উইকিমিডিয়া", "ফাউন্ডেশন", "একটি", "অলাভজনক", "সংস্থা", "উইকিপিডিয়ার",
+        "শুরু", "১৫", "জানুয়ারি", "২০০১", "সালে", "এখন", "পর্যন্ত", "২০০টিরও", "বেশী", "ভাষায়", "উইকিপিডিয়া", "রয়েছে" });
+  }
+  
+  public void testFarsi() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "ویکی پدیای انگلیسی در تاریخ ۲۵ دی ۱۳۷۹ به صورت مکملی برای دانشنامهٔ تخصصی نوپدیا نوشته شد.",
+        new String[] { "ویکی", "پدیای", "انگلیسی", "در", "تاریخ", "۲۵", "دی", "۱۳۷۹", "به", "صورت", "مکملی",
+        "برای", "دانشنامهٔ", "تخصصی", "نوپدیا", "نوشته", "شد" });
+  }
+  
+  public void testGreek() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "Γράφεται σε συνεργασία από εθελοντές με το λογισμικό wiki, κάτι που σημαίνει ότι άρθρα μπορεί να προστεθούν ή να αλλάξουν από τον καθένα.",
+        new String[] { "Γράφεται", "σε", "συνεργασία", "από", "εθελοντές", "με", "το", "λογισμικό", "wiki", "κάτι", "που",
+        "σημαίνει", "ότι", "άρθρα", "μπορεί", "να", "προστεθούν", "ή", "να", "αλλάξουν", "από", "τον", "καθένα" });
+  }
+
+  public void testThai() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "การที่ได้ต้องแสดงว่างานดี. แล้วเธอจะไปไหน? ๑๒๓๔",
+        new String[] { "การที่ได้ต้องแสดงว่างานดี", "แล้วเธอจะไปไหน", "๑๒๓๔" });
+  }
+  
+  public void testLao() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "ສາທາລະນະລັດ ປະຊາທິປະໄຕ ປະຊາຊົນລາວ", 
+        new String[] { "ສາທາລະນະລັດ", "ປະຊາທິປະໄຕ", "ປະຊາຊົນລາວ" });
+  }
+  
+  public void testTibetan() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "སྣོན་མཛོད་དང་ལས་འདིས་བོད་ཡིག་མི་ཉམས་གོང་འཕེལ་དུ་གཏོང་བར་ཧ་ཅང་དགེ་མཚན་མཆིས་སོ། །",
+                     new String[] { "སྣོན", "མཛོད", "དང", "ལས", "འདིས", "བོད", "ཡིག", 
+                                    "མི", "ཉམས", "གོང", "འཕེལ", "དུ", "གཏོང", "བར", 
+                                    "ཧ", "ཅང", "དགེ", "མཚན", "མཆིས", "སོ" });
+  }
+  
+  /*
+   * For chinese, tokenize as char (these can later form bigrams or whatever)
+   */
+  public void testChinese() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "我是中国人。 1234 Tests ",
+        new String[] { "我", "是", "中", "国", "人", "1234", "Tests"});
+  }
+  
+  public void testEmpty() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "", new String[] {});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, ".", new String[] {});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, " ", new String[] {});
+  }
+  
+  /* test various jira issues this analyzer is related to */
+  
+  public void testLUCENE1545() throws Exception {
+    /*
+     * Standard analyzer does not correctly tokenize combining character U+0364 COMBINING LATIN SMALL LETTRE E.
+     * The word "moͤchte" is incorrectly tokenized into "mo" "chte", the combining character is lost.
+     * Expected result is only on token "moͤchte".
+     */
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "moͤchte", new String[] { "moͤchte" }); 
+  }
+  
+  /* Tests from StandardAnalyzer, just to show behavior is similar */
+  public void testAlphanumericSA() throws Exception {
+    // alphanumeric tokens
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "B2B", new String[]{"B2B"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "2B", new String[]{"2B"});
+  }
+
+  public void testDelimitersSA() throws Exception {
+    // other delimiters: "-", "/", ","
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "some-dashed-phrase", new String[]{"some", "dashed", "phrase"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "dogs,chase,cats", new String[]{"dogs", "chase", "cats"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "ac/dc", new String[]{"ac", "dc"});
+  }
+
+  public void testApostrophesSA() throws Exception {
+    // internal apostrophes: O'Reilly, you're, O'Reilly's
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "O'Reilly", new String[]{"O'Reilly"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "you're", new String[]{"you're"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "she's", new String[]{"she's"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "Jim's", new String[]{"Jim's"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "don't", new String[]{"don't"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "O'Reilly's", new String[]{"O'Reilly's"});
+  }
+
+  public void testNumericSA() throws Exception {
+    // floating point, serial, model numbers, ip addresses, etc.
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "21.35", new String[]{"21.35"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "R2D2 C3PO", new String[]{"R2D2", "C3PO"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "216.239.63.104", new String[]{"216.239.63.104"});
+  }
+
+  public void testTextWithNumbersSA() throws Exception {
+    // numbers
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "David has 5000 bones", new String[]{"David", "has", "5000", "bones"});
+  }
+
+  public void testVariousTextSA() throws Exception {
+    // various
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "C embedded developers wanted", new String[]{"C", "embedded", "developers", "wanted"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "foo bar FOO BAR", new String[]{"foo", "bar", "FOO", "BAR"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "foo      bar .  FOO <> BAR", new String[]{"foo", "bar", "FOO", "BAR"});
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "\"QUOTED\" word", new String[]{"QUOTED", "word"});
+  }
+
+  public void testKoreanSA() throws Exception {
+    // Korean words
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "안녕하세요 한글입니다", new String[]{"안녕하세요", "한글입니다"});
+  }
+  
+  public void testOffsets() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "David has 5000 bones", 
+        new String[] {"David", "has", "5000", "bones"},
+        new int[] {0, 6, 10, 15},
+        new int[] {5, 9, 14, 20});
+  }
+  
+  public void testTypes() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "David has 5000 bones", 
+        new String[] {"David", "has", "5000", "bones"},
+        new String[] { "<ALPHANUM>", "<ALPHANUM>", "<NUM>", "<ALPHANUM>" });
+  }
+  
+  public void testWikiURLs() throws Exception {
+    Reader reader = null;
+    String luceneResourcesWikiPage;
+    try {
+      reader = new InputStreamReader(getClass().getResourceAsStream
+        ("LuceneResourcesWikiPage.html"), "UTF-8");
+      StringBuilder builder = new StringBuilder();
+      char[] buffer = new char[1024];
+      int numCharsRead;
+      while (-1 != (numCharsRead = reader.read(buffer))) {
+        builder.append(buffer, 0, numCharsRead);
+      }
+      luceneResourcesWikiPage = builder.toString(); 
+    } finally {
+      if (null != reader) {
+        reader.close();
+      }
+    }
+    assertTrue(null != luceneResourcesWikiPage 
+               && luceneResourcesWikiPage.length() > 0);
+    BufferedReader bufferedReader = null;
+    String[] urls;
+    try {
+      List<String> urlList = new ArrayList<String>();
+      bufferedReader = new BufferedReader(new InputStreamReader
+        (getClass().getResourceAsStream("LuceneResourcesWikiPageURLs.txt"), "UTF-8"));
+      String line;
+      while (null != (line = bufferedReader.readLine())) {
+        line = line.trim();
+        if (line.length() > 0) {
+          urlList.add(line);
+        }
+      }
+      urls = urlList.toArray(new String[urlList.size()]);
+    } finally {
+      if (null != bufferedReader) {
+        bufferedReader.close();
+      }
+    }
+    assertTrue(null != urls && urls.length > 0);
+    BaseTokenStreamTestCase.assertAnalyzesTo
+      (urlAnalyzer, luceneResourcesWikiPage, urls);
+  }
+  
+  public void testEmails() throws Exception {
+    Reader reader = null;
+    String randomTextWithEmails;
+    try {
+      reader = new InputStreamReader(getClass().getResourceAsStream
+        ("random.text.with.email.addresses.txt"), "UTF-8");
+      StringBuilder builder = new StringBuilder();
+      char[] buffer = new char[1024];
+      int numCharsRead;
+      while (-1 != (numCharsRead = reader.read(buffer))) {
+        builder.append(buffer, 0, numCharsRead);
+      }
+      randomTextWithEmails = builder.toString(); 
+    } finally {
+      if (null != reader) {
+        reader.close();
+      }
+    }
+    assertTrue(null != randomTextWithEmails 
+               && randomTextWithEmails.length() > 0);
+    BufferedReader bufferedReader = null;
+    String[] emails;
+    try {
+      List<String> emailList = new ArrayList<String>();
+      bufferedReader = new BufferedReader(new InputStreamReader
+        (getClass().getResourceAsStream
+          ("email.addresses.from.random.text.with.email.addresses.txt"), "UTF-8"));
+      String line;
+      while (null != (line = bufferedReader.readLine())) {
+        line = line.trim();
+        if (line.length() > 0) {
+          emailList.add(line);
+        }
+      }
+      emails = emailList.toArray(new String[emailList.size()]);
+    } finally {
+      if (null != bufferedReader) {
+        bufferedReader.close();
+      }
+    }
+    assertTrue(null != emails && emails.length > 0);
+    BaseTokenStreamTestCase.assertAnalyzesTo
+      (emailAnalyzer, randomTextWithEmails, emails);
+  }
+
+  public void testURLs() throws Exception {
+    Reader reader = null;
+    String randomTextWithURLs;
+    try {
+      reader = new InputStreamReader(getClass().getResourceAsStream
+        ("random.text.with.urls.txt"), "UTF-8");
+      StringBuilder builder = new StringBuilder();
+      char[] buffer = new char[1024];
+      int numCharsRead;
+      while (-1 != (numCharsRead = reader.read(buffer))) {
+        builder.append(buffer, 0, numCharsRead);
+      }
+      randomTextWithURLs = builder.toString(); 
+    } finally {
+      if (null != reader) {
+        reader.close();
+      }
+    }
+    assertTrue(null != randomTextWithURLs 
+               && randomTextWithURLs.length() > 0);
+    BufferedReader bufferedReader = null;
+    String[] urls;
+    try {
+      List<String> urlList = new ArrayList<String>();
+      bufferedReader = new BufferedReader(new InputStreamReader
+        (getClass().getResourceAsStream
+          ("urls.from.random.text.with.urls.txt"), "UTF-8"));
+      String line;
+      while (null != (line = bufferedReader.readLine())) {
+        line = line.trim();
+        if (line.length() > 0) {
+          urlList.add(line);
+        }
+      }
+      urls = urlList.toArray(new String[urlList.size()]);
+    } finally {
+      if (null != bufferedReader) {
+        bufferedReader.close();
+      }
+    }
+    assertTrue(null != urls && urls.length > 0);
+    BaseTokenStreamTestCase.assertAnalyzesTo
+      (urlAnalyzer, randomTextWithURLs, urls);
+  }
+
+  public void testUnicodeWordBreaks() throws Exception {
+    WordBreakTestUnicode_6_0_0 wordBreakTest = new WordBreakTestUnicode_6_0_0();
+    wordBreakTest.test(a);
+  }
+  
+  public void testSupplementary() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "𩬅艱鍟䇹愯瀛", 
+        new String[] {"𩬅", "艱", "鍟", "䇹", "愯", "瀛"},
+        new String[] { "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>" });
+  }
+  
+  public void testKorean() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "훈민정음",
+        new String[] { "훈민정음" },
+        new String[] { "<HANGUL>" });
+  }
+  
+  public void testJapanese() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "仮名遣い カタカナ",
+        new String[] { "仮", "名", "遣", "い", "カタカナ" },
+        new String[] { "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<HIRAGANA>", "<KATAKANA>" });
+  }
+
+  public void testCombiningMarks() throws Exception {
+    checkOneTerm(a, "ざ", "ざ"); // hiragana
+    checkOneTerm(a, "ザ", "ザ"); // katakana
+    checkOneTerm(a, "壹゙", "壹゙"); // ideographic
+    checkOneTerm(a, "아゙",  "아゙"); // hangul
+  }
+
+  /** @deprecated remove this and sophisticated backwards layer in 5.0 */
+  @Deprecated
+  public void testCombiningMarksBackwards() throws Exception {
+    Analyzer a = new ReusableAnalyzerBase() {
+      @Override
+      protected TokenStreamComponents createComponents
+        (String fieldName, Reader reader) {
+
+        Tokenizer tokenizer = new UAX29URLEmailTokenizer(reader);
+        return new TokenStreamComponents(tokenizer);
+      }
+    };
+    checkOneTerm(a, "ざ", "さ"); // hiragana Bug
+    checkOneTerm(a, "ザ", "ザ"); // katakana Works
+    checkOneTerm(a, "壹゙", "壹"); // ideographic Bug
+    checkOneTerm(a, "아゙",  "아゙"); // hangul Works
+  }
+
+  /** blast some random strings through the analyzer */
+  public void testRandomStrings() throws Exception {
+    checkRandomData(random, a, 10000*RANDOM_MULTIPLIER);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/WordBreakTestUnicode_6_0_0.java b/lucene/backwards/src/test/org/apache/lucene/analysis/WordBreakTestUnicode_6_0_0.java
new file mode 100644
index 0000000..975b94c
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/WordBreakTestUnicode_6_0_0.java
@@ -0,0 +1,3958 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.junit.Ignore;
+
+/**
+ * This class was automatically generated by generateJavaUnicodeWordBreakTest.pl
+ * from: http://www.unicode.org/Public/6.0.0/ucd/auxiliary/WordBreakTest.txt
+ *
+ * WordBreakTest.txt indicates the points in the provided character sequences
+ * at which conforming implementations must and must not break words.  This
+ * class tests for expected token extraction from each of the test sequences
+ * in WordBreakTest.txt, where the expected tokens are those character
+ * sequences bounded by word breaks and containing at least one character
+ * from one of the following character sets:
+ *
+ *    \p{Script = Han}                (From http://www.unicode.org/Public/6.0.0/ucd/Scripts.txt)
+ *    \p{Script = Hiragana}
+ *    \p{LineBreak = Complex_Context} (From http://www.unicode.org/Public/6.0.0/ucd/LineBreak.txt)
+ *    \p{WordBreak = ALetter}         (From http://www.unicode.org/Public/6.0.0/ucd/auxiliary/WordBreakProperty.txt)
+ *    \p{WordBreak = Katakana}
+ *    \p{WordBreak = Numeric}         (Excludes full-width Arabic digits)
+ *    [\uFF10-\uFF19]                 (Full-width Arabic digits)
+ */
+@Ignore
+public class WordBreakTestUnicode_6_0_0 extends BaseTokenStreamTestCase {
+
+  public void test(Analyzer analyzer) throws Exception {
+    // ÷ 0001 ÷ 0001 ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0001",
+                     new String[] {  });
+
+    // ÷ 0001 × 0308 ÷ 0001 ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u0001",
+                     new String[] {  });
+
+    // ÷ 0001 ÷ 000D ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\r",
+                     new String[] {  });
+
+    // ÷ 0001 × 0308 ÷ 000D ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\r",
+                     new String[] {  });
+
+    // ÷ 0001 ÷ 000A ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\n",
+                     new String[] {  });
+
+    // ÷ 0001 × 0308 ÷ 000A ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\n",
+                     new String[] {  });
+
+    // ÷ 0001 ÷ 000B ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u000B",
+                     new String[] {  });
+
+    // ÷ 0001 × 0308 ÷ 000B ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u000B",
+                     new String[] {  });
+
+    // ÷ 0001 ÷ 3031 ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u3031",
+                     new String[] { "\u3031" });
+
+    // ÷ 0001 × 0308 ÷ 3031 ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u3031",
+                     new String[] { "\u3031" });
+
+    // ÷ 0001 ÷ 0041 ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0041",
+                     new String[] { "\u0041" });
+
+    // ÷ 0001 × 0308 ÷ 0041 ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u0041",
+                     new String[] { "\u0041" });
+
+    // ÷ 0001 ÷ 003A ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u003A",
+                     new String[] {  });
+
+    // ÷ 0001 × 0308 ÷ 003A ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u003A",
+                     new String[] {  });
+
+    // ÷ 0001 ÷ 002C ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u002C",
+                     new String[] {  });
+
+    // ÷ 0001 × 0308 ÷ 002C ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u002C",
+                     new String[] {  });
+
+    // ÷ 0001 ÷ 0027 ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0027",
+                     new String[] {  });
+
+    // ÷ 0001 × 0308 ÷ 0027 ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u0027",
+                     new String[] {  });
+
+    // ÷ 0001 ÷ 0030 ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0030",
+                     new String[] { "\u0030" });
+
+    // ÷ 0001 × 0308 ÷ 0030 ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u0030",
+                     new String[] { "\u0030" });
+
+    // ÷ 0001 ÷ 005F ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u005F",
+                     new String[] {  });
+
+    // ÷ 0001 × 0308 ÷ 005F ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u005F",
+                     new String[] {  });
+
+    // ÷ 0001 × 00AD ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u00AD",
+                     new String[] {  });
+
+    // ÷ 0001 × 0308 × 00AD ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u00AD",
+                     new String[] {  });
+
+    // ÷ 0001 × 0300 ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0300",
+                     new String[] {  });
+
+    // ÷ 0001 × 0308 × 0300 ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u0300",
+                     new String[] {  });
+
+    // ÷ 0001 ÷ 0061 × 2060 ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0061\u2060",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 0001 × 0308 ÷ 0061 × 2060 ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u0061\u2060",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 0001 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0061\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 0001 × 0308 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u0061\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 0001 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0061\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 0001 × 0308 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u0061\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 0001 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0061\u0027\u2060",
+                     new String[] { "\u0061" });
+
+    // ÷ 0001 × 0308 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0061" });
+
+    // ÷ 0001 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0061\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 0001 × 0308 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u0061\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 0001 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0031\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 0001 × 0308 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u0031\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 0001 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0031\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 0001 × 0308 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u0031\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 0001 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0031\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 0001 × 0308 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u0031\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 0001 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] <START OF HEADING> (Other) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0031\u002E\u2060",
+                     new String[] { "\u0031" });
+
+    // ÷ 0001 × 0308 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0001\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0031" });
+
+    // ÷ 000D ÷ 0001 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0001",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 0308 ÷ 0001 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u0001",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 000D ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\r",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 0308 ÷ 000D ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\r",
+                     new String[] {  });
+
+    // ÷ 000D × 000A ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) × [3.0] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\n",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 0308 ÷ 000A ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\n",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 000B ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u000B",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 0308 ÷ 000B ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u000B",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 3031 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u3031",
+                     new String[] { "\u3031" });
+
+    // ÷ 000D ÷ 0308 ÷ 3031 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u3031",
+                     new String[] { "\u3031" });
+
+    // ÷ 000D ÷ 0041 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0041",
+                     new String[] { "\u0041" });
+
+    // ÷ 000D ÷ 0308 ÷ 0041 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u0041",
+                     new String[] { "\u0041" });
+
+    // ÷ 000D ÷ 003A ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u003A",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 0308 ÷ 003A ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u003A",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 002C ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u002C",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 0308 ÷ 002C ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u002C",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 0027 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0027",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 0308 ÷ 0027 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u0027",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 0030 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0030",
+                     new String[] { "\u0030" });
+
+    // ÷ 000D ÷ 0308 ÷ 0030 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u0030",
+                     new String[] { "\u0030" });
+
+    // ÷ 000D ÷ 005F ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u005F",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 0308 ÷ 005F ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u005F",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 00AD ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u00AD",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 0308 × 00AD ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u00AD",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 0300 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0300",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 0308 × 0300 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u0300",
+                     new String[] {  });
+
+    // ÷ 000D ÷ 0061 × 2060 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0061\u2060",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 000D ÷ 0308 ÷ 0061 × 2060 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u0061\u2060",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 000D ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0061\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 000D ÷ 0308 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u0061\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 000D ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0061\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 000D ÷ 0308 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u0061\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 000D ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0061\u0027\u2060",
+                     new String[] { "\u0061" });
+
+    // ÷ 000D ÷ 0308 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0061" });
+
+    // ÷ 000D ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0061\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 000D ÷ 0308 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u0061\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 000D ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0031\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 000D ÷ 0308 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u0031\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 000D ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0031\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 000D ÷ 0308 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u0031\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 000D ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0031\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 000D ÷ 0308 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u0031\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 000D ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0031\u002E\u2060",
+                     new String[] { "\u0031" });
+
+    // ÷ 000D ÷ 0308 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\r\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0031" });
+
+    // ÷ 000A ÷ 0001 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0001",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 0308 ÷ 0001 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u0001",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 000D ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\r",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 0308 ÷ 000D ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\r",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 000A ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\n",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 0308 ÷ 000A ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\n",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 000B ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u000B",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 0308 ÷ 000B ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u000B",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 3031 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u3031",
+                     new String[] { "\u3031" });
+
+    // ÷ 000A ÷ 0308 ÷ 3031 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u3031",
+                     new String[] { "\u3031" });
+
+    // ÷ 000A ÷ 0041 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0041",
+                     new String[] { "\u0041" });
+
+    // ÷ 000A ÷ 0308 ÷ 0041 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u0041",
+                     new String[] { "\u0041" });
+
+    // ÷ 000A ÷ 003A ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u003A",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 0308 ÷ 003A ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u003A",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 002C ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u002C",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 0308 ÷ 002C ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u002C",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 0027 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0027",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 0308 ÷ 0027 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u0027",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 0030 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0030",
+                     new String[] { "\u0030" });
+
+    // ÷ 000A ÷ 0308 ÷ 0030 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u0030",
+                     new String[] { "\u0030" });
+
+    // ÷ 000A ÷ 005F ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u005F",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 0308 ÷ 005F ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u005F",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 00AD ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u00AD",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 0308 × 00AD ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u00AD",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 0300 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0300",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 0308 × 0300 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u0300",
+                     new String[] {  });
+
+    // ÷ 000A ÷ 0061 × 2060 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0061\u2060",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 000A ÷ 0308 ÷ 0061 × 2060 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u0061\u2060",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 000A ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0061\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 000A ÷ 0308 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u0061\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 000A ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0061\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 000A ÷ 0308 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u0061\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 000A ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0061\u0027\u2060",
+                     new String[] { "\u0061" });
+
+    // ÷ 000A ÷ 0308 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0061" });
+
+    // ÷ 000A ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0061\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 000A ÷ 0308 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u0061\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 000A ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0031\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 000A ÷ 0308 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u0031\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 000A ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0031\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 000A ÷ 0308 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u0031\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 000A ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0031\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 000A ÷ 0308 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u0031\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 000A ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0031\u002E\u2060",
+                     new String[] { "\u0031" });
+
+    // ÷ 000A ÷ 0308 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\n\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0031" });
+
+    // ÷ 000B ÷ 0001 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0001",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 0308 ÷ 0001 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u0001",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 000D ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\r",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 0308 ÷ 000D ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\r",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 000A ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\n",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 0308 ÷ 000A ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\n",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 000B ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u000B",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 0308 ÷ 000B ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u000B",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 3031 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u3031",
+                     new String[] { "\u3031" });
+
+    // ÷ 000B ÷ 0308 ÷ 3031 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u3031",
+                     new String[] { "\u3031" });
+
+    // ÷ 000B ÷ 0041 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0041",
+                     new String[] { "\u0041" });
+
+    // ÷ 000B ÷ 0308 ÷ 0041 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u0041",
+                     new String[] { "\u0041" });
+
+    // ÷ 000B ÷ 003A ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u003A",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 0308 ÷ 003A ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u003A",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 002C ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u002C",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 0308 ÷ 002C ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u002C",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 0027 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0027",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 0308 ÷ 0027 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u0027",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 0030 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0030",
+                     new String[] { "\u0030" });
+
+    // ÷ 000B ÷ 0308 ÷ 0030 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u0030",
+                     new String[] { "\u0030" });
+
+    // ÷ 000B ÷ 005F ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u005F",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 0308 ÷ 005F ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u005F",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 00AD ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u00AD",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 0308 × 00AD ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u00AD",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 0300 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0300",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 0308 × 0300 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u0300",
+                     new String[] {  });
+
+    // ÷ 000B ÷ 0061 × 2060 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0061\u2060",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 000B ÷ 0308 ÷ 0061 × 2060 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u0061\u2060",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 000B ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0061\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 000B ÷ 0308 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u0061\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 000B ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0061\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 000B ÷ 0308 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u0061\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 000B ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0061\u0027\u2060",
+                     new String[] { "\u0061" });
+
+    // ÷ 000B ÷ 0308 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0061" });
+
+    // ÷ 000B ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0061\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 000B ÷ 0308 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u0061\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 000B ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0031\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 000B ÷ 0308 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u0031\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 000B ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0031\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 000B ÷ 0308 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u0031\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 000B ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0031\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 000B ÷ 0308 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u0031\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 000B ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0031\u002E\u2060",
+                     new String[] { "\u0031" });
+
+    // ÷ 000B ÷ 0308 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] <LINE TABULATION> (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u000B\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0031" });
+
+    // ÷ 3031 ÷ 0001 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0001",
+                     new String[] { "\u3031" });
+
+    // ÷ 3031 × 0308 ÷ 0001 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u0001",
+                     new String[] { "\u3031\u0308" });
+
+    // ÷ 3031 ÷ 000D ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\r",
+                     new String[] { "\u3031" });
+
+    // ÷ 3031 × 0308 ÷ 000D ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\r",
+                     new String[] { "\u3031\u0308" });
+
+    // ÷ 3031 ÷ 000A ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\n",
+                     new String[] { "\u3031" });
+
+    // ÷ 3031 × 0308 ÷ 000A ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\n",
+                     new String[] { "\u3031\u0308" });
+
+    // ÷ 3031 ÷ 000B ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u000B",
+                     new String[] { "\u3031" });
+
+    // ÷ 3031 × 0308 ÷ 000B ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u000B",
+                     new String[] { "\u3031\u0308" });
+
+    // ÷ 3031 × 3031 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [13.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u3031",
+                     new String[] { "\u3031\u3031" });
+
+    // ÷ 3031 × 0308 × 3031 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u3031",
+                     new String[] { "\u3031\u0308\u3031" });
+
+    // ÷ 3031 ÷ 0041 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0041",
+                     new String[] { "\u3031", "\u0041" });
+
+    // ÷ 3031 × 0308 ÷ 0041 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u0041",
+                     new String[] { "\u3031\u0308", "\u0041" });
+
+    // ÷ 3031 ÷ 003A ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u003A",
+                     new String[] { "\u3031" });
+
+    // ÷ 3031 × 0308 ÷ 003A ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u003A",
+                     new String[] { "\u3031\u0308" });
+
+    // ÷ 3031 ÷ 002C ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u002C",
+                     new String[] { "\u3031" });
+
+    // ÷ 3031 × 0308 ÷ 002C ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u002C",
+                     new String[] { "\u3031\u0308" });
+
+    // ÷ 3031 ÷ 0027 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0027",
+                     new String[] { "\u3031" });
+
+    // ÷ 3031 × 0308 ÷ 0027 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u0027",
+                     new String[] { "\u3031\u0308" });
+
+    // ÷ 3031 ÷ 0030 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0030",
+                     new String[] { "\u3031", "\u0030" });
+
+    // ÷ 3031 × 0308 ÷ 0030 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u0030",
+                     new String[] { "\u3031\u0308", "\u0030" });
+
+    // ÷ 3031 × 005F ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u005F",
+                     new String[] { "\u3031\u005F" });
+
+    // ÷ 3031 × 0308 × 005F ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u005F",
+                     new String[] { "\u3031\u0308\u005F" });
+
+    // ÷ 3031 × 00AD ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u00AD",
+                     new String[] { "\u3031\u00AD" });
+
+    // ÷ 3031 × 0308 × 00AD ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u00AD",
+                     new String[] { "\u3031\u0308\u00AD" });
+
+    // ÷ 3031 × 0300 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0300",
+                     new String[] { "\u3031\u0300" });
+
+    // ÷ 3031 × 0308 × 0300 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u0300",
+                     new String[] { "\u3031\u0308\u0300" });
+
+    // ÷ 3031 ÷ 0061 × 2060 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0061\u2060",
+                     new String[] { "\u3031", "\u0061\u2060" });
+
+    // ÷ 3031 × 0308 ÷ 0061 × 2060 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u0061\u2060",
+                     new String[] { "\u3031\u0308", "\u0061\u2060" });
+
+    // ÷ 3031 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0061\u003A",
+                     new String[] { "\u3031", "\u0061" });
+
+    // ÷ 3031 × 0308 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u0061\u003A",
+                     new String[] { "\u3031\u0308", "\u0061" });
+
+    // ÷ 3031 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0061\u0027",
+                     new String[] { "\u3031", "\u0061" });
+
+    // ÷ 3031 × 0308 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u0061\u0027",
+                     new String[] { "\u3031\u0308", "\u0061" });
+
+    // ÷ 3031 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0061\u0027\u2060",
+                     new String[] { "\u3031", "\u0061" });
+
+    // ÷ 3031 × 0308 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u0061\u0027\u2060",
+                     new String[] { "\u3031\u0308", "\u0061" });
+
+    // ÷ 3031 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0061\u002C",
+                     new String[] { "\u3031", "\u0061" });
+
+    // ÷ 3031 × 0308 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u0061\u002C",
+                     new String[] { "\u3031\u0308", "\u0061" });
+
+    // ÷ 3031 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0031\u003A",
+                     new String[] { "\u3031", "\u0031" });
+
+    // ÷ 3031 × 0308 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u0031\u003A",
+                     new String[] { "\u3031\u0308", "\u0031" });
+
+    // ÷ 3031 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0031\u0027",
+                     new String[] { "\u3031", "\u0031" });
+
+    // ÷ 3031 × 0308 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u0031\u0027",
+                     new String[] { "\u3031\u0308", "\u0031" });
+
+    // ÷ 3031 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0031\u002C",
+                     new String[] { "\u3031", "\u0031" });
+
+    // ÷ 3031 × 0308 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u0031\u002C",
+                     new String[] { "\u3031\u0308", "\u0031" });
+
+    // ÷ 3031 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0031\u002E\u2060",
+                     new String[] { "\u3031", "\u0031" });
+
+    // ÷ 3031 × 0308 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u3031\u0308\u0031\u002E\u2060",
+                     new String[] { "\u3031\u0308", "\u0031" });
+
+    // ÷ 0041 ÷ 0001 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0001",
+                     new String[] { "\u0041" });
+
+    // ÷ 0041 × 0308 ÷ 0001 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u0001",
+                     new String[] { "\u0041\u0308" });
+
+    // ÷ 0041 ÷ 000D ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\r",
+                     new String[] { "\u0041" });
+
+    // ÷ 0041 × 0308 ÷ 000D ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\r",
+                     new String[] { "\u0041\u0308" });
+
+    // ÷ 0041 ÷ 000A ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\n",
+                     new String[] { "\u0041" });
+
+    // ÷ 0041 × 0308 ÷ 000A ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\n",
+                     new String[] { "\u0041\u0308" });
+
+    // ÷ 0041 ÷ 000B ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u000B",
+                     new String[] { "\u0041" });
+
+    // ÷ 0041 × 0308 ÷ 000B ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u000B",
+                     new String[] { "\u0041\u0308" });
+
+    // ÷ 0041 ÷ 3031 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u3031",
+                     new String[] { "\u0041", "\u3031" });
+
+    // ÷ 0041 × 0308 ÷ 3031 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u3031",
+                     new String[] { "\u0041\u0308", "\u3031" });
+
+    // ÷ 0041 × 0041 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [5.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0041",
+                     new String[] { "\u0041\u0041" });
+
+    // ÷ 0041 × 0308 × 0041 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u0041",
+                     new String[] { "\u0041\u0308\u0041" });
+
+    // ÷ 0041 ÷ 003A ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u003A",
+                     new String[] { "\u0041" });
+
+    // ÷ 0041 × 0308 ÷ 003A ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u003A",
+                     new String[] { "\u0041\u0308" });
+
+    // ÷ 0041 ÷ 002C ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u002C",
+                     new String[] { "\u0041" });
+
+    // ÷ 0041 × 0308 ÷ 002C ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u002C",
+                     new String[] { "\u0041\u0308" });
+
+    // ÷ 0041 ÷ 0027 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0027",
+                     new String[] { "\u0041" });
+
+    // ÷ 0041 × 0308 ÷ 0027 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u0027",
+                     new String[] { "\u0041\u0308" });
+
+    // ÷ 0041 × 0030 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [9.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0030",
+                     new String[] { "\u0041\u0030" });
+
+    // ÷ 0041 × 0308 × 0030 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u0030",
+                     new String[] { "\u0041\u0308\u0030" });
+
+    // ÷ 0041 × 005F ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u005F",
+                     new String[] { "\u0041\u005F" });
+
+    // ÷ 0041 × 0308 × 005F ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u005F",
+                     new String[] { "\u0041\u0308\u005F" });
+
+    // ÷ 0041 × 00AD ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u00AD",
+                     new String[] { "\u0041\u00AD" });
+
+    // ÷ 0041 × 0308 × 00AD ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u00AD",
+                     new String[] { "\u0041\u0308\u00AD" });
+
+    // ÷ 0041 × 0300 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0300",
+                     new String[] { "\u0041\u0300" });
+
+    // ÷ 0041 × 0308 × 0300 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u0300",
+                     new String[] { "\u0041\u0308\u0300" });
+
+    // ÷ 0041 × 0061 × 2060 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0061\u2060",
+                     new String[] { "\u0041\u0061\u2060" });
+
+    // ÷ 0041 × 0308 × 0061 × 2060 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u0061\u2060",
+                     new String[] { "\u0041\u0308\u0061\u2060" });
+
+    // ÷ 0041 × 0061 ÷ 003A ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0061\u003A",
+                     new String[] { "\u0041\u0061" });
+
+    // ÷ 0041 × 0308 × 0061 ÷ 003A ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u0061\u003A",
+                     new String[] { "\u0041\u0308\u0061" });
+
+    // ÷ 0041 × 0061 ÷ 0027 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0061\u0027",
+                     new String[] { "\u0041\u0061" });
+
+    // ÷ 0041 × 0308 × 0061 ÷ 0027 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u0061\u0027",
+                     new String[] { "\u0041\u0308\u0061" });
+
+    // ÷ 0041 × 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0061\u0027\u2060",
+                     new String[] { "\u0041\u0061" });
+
+    // ÷ 0041 × 0308 × 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0041\u0308\u0061" });
+
+    // ÷ 0041 × 0061 ÷ 002C ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0061\u002C",
+                     new String[] { "\u0041\u0061" });
+
+    // ÷ 0041 × 0308 × 0061 ÷ 002C ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u0061\u002C",
+                     new String[] { "\u0041\u0308\u0061" });
+
+    // ÷ 0041 × 0031 ÷ 003A ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0031\u003A",
+                     new String[] { "\u0041\u0031" });
+
+    // ÷ 0041 × 0308 × 0031 ÷ 003A ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u0031\u003A",
+                     new String[] { "\u0041\u0308\u0031" });
+
+    // ÷ 0041 × 0031 ÷ 0027 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0031\u0027",
+                     new String[] { "\u0041\u0031" });
+
+    // ÷ 0041 × 0308 × 0031 ÷ 0027 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u0031\u0027",
+                     new String[] { "\u0041\u0308\u0031" });
+
+    // ÷ 0041 × 0031 ÷ 002C ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0031\u002C",
+                     new String[] { "\u0041\u0031" });
+
+    // ÷ 0041 × 0308 × 0031 ÷ 002C ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u0031\u002C",
+                     new String[] { "\u0041\u0308\u0031" });
+
+    // ÷ 0041 × 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0031\u002E\u2060",
+                     new String[] { "\u0041\u0031" });
+
+    // ÷ 0041 × 0308 × 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0041\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0041\u0308\u0031" });
+
+    // ÷ 003A ÷ 0001 ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0001",
+                     new String[] {  });
+
+    // ÷ 003A × 0308 ÷ 0001 ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u0001",
+                     new String[] {  });
+
+    // ÷ 003A ÷ 000D ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\r",
+                     new String[] {  });
+
+    // ÷ 003A × 0308 ÷ 000D ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\r",
+                     new String[] {  });
+
+    // ÷ 003A ÷ 000A ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\n",
+                     new String[] {  });
+
+    // ÷ 003A × 0308 ÷ 000A ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\n",
+                     new String[] {  });
+
+    // ÷ 003A ÷ 000B ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u000B",
+                     new String[] {  });
+
+    // ÷ 003A × 0308 ÷ 000B ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u000B",
+                     new String[] {  });
+
+    // ÷ 003A ÷ 3031 ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u3031",
+                     new String[] { "\u3031" });
+
+    // ÷ 003A × 0308 ÷ 3031 ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u3031",
+                     new String[] { "\u3031" });
+
+    // ÷ 003A ÷ 0041 ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0041",
+                     new String[] { "\u0041" });
+
+    // ÷ 003A × 0308 ÷ 0041 ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u0041",
+                     new String[] { "\u0041" });
+
+    // ÷ 003A ÷ 003A ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u003A",
+                     new String[] {  });
+
+    // ÷ 003A × 0308 ÷ 003A ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u003A",
+                     new String[] {  });
+
+    // ÷ 003A ÷ 002C ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u002C",
+                     new String[] {  });
+
+    // ÷ 003A × 0308 ÷ 002C ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u002C",
+                     new String[] {  });
+
+    // ÷ 003A ÷ 0027 ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0027",
+                     new String[] {  });
+
+    // ÷ 003A × 0308 ÷ 0027 ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u0027",
+                     new String[] {  });
+
+    // ÷ 003A ÷ 0030 ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0030",
+                     new String[] { "\u0030" });
+
+    // ÷ 003A × 0308 ÷ 0030 ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u0030",
+                     new String[] { "\u0030" });
+
+    // ÷ 003A ÷ 005F ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u005F",
+                     new String[] {  });
+
+    // ÷ 003A × 0308 ÷ 005F ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u005F",
+                     new String[] {  });
+
+    // ÷ 003A × 00AD ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u00AD",
+                     new String[] {  });
+
+    // ÷ 003A × 0308 × 00AD ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u00AD",
+                     new String[] {  });
+
+    // ÷ 003A × 0300 ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0300",
+                     new String[] {  });
+
+    // ÷ 003A × 0308 × 0300 ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u0300",
+                     new String[] {  });
+
+    // ÷ 003A ÷ 0061 × 2060 ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0061\u2060",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 003A × 0308 ÷ 0061 × 2060 ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u0061\u2060",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 003A ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0061\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 003A × 0308 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u0061\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 003A ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0061\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 003A × 0308 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u0061\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 003A ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0061\u0027\u2060",
+                     new String[] { "\u0061" });
+
+    // ÷ 003A × 0308 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0061" });
+
+    // ÷ 003A ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0061\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 003A × 0308 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u0061\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 003A ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0031\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 003A × 0308 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u0031\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 003A ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0031\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 003A × 0308 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u0031\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 003A ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0031\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 003A × 0308 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u0031\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 003A ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0031\u002E\u2060",
+                     new String[] { "\u0031" });
+
+    // ÷ 003A × 0308 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u003A\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0031" });
+
+    // ÷ 002C ÷ 0001 ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0001",
+                     new String[] {  });
+
+    // ÷ 002C × 0308 ÷ 0001 ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u0001",
+                     new String[] {  });
+
+    // ÷ 002C ÷ 000D ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\r",
+                     new String[] {  });
+
+    // ÷ 002C × 0308 ÷ 000D ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\r",
+                     new String[] {  });
+
+    // ÷ 002C ÷ 000A ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\n",
+                     new String[] {  });
+
+    // ÷ 002C × 0308 ÷ 000A ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\n",
+                     new String[] {  });
+
+    // ÷ 002C ÷ 000B ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u000B",
+                     new String[] {  });
+
+    // ÷ 002C × 0308 ÷ 000B ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u000B",
+                     new String[] {  });
+
+    // ÷ 002C ÷ 3031 ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u3031",
+                     new String[] { "\u3031" });
+
+    // ÷ 002C × 0308 ÷ 3031 ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u3031",
+                     new String[] { "\u3031" });
+
+    // ÷ 002C ÷ 0041 ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0041",
+                     new String[] { "\u0041" });
+
+    // ÷ 002C × 0308 ÷ 0041 ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u0041",
+                     new String[] { "\u0041" });
+
+    // ÷ 002C ÷ 003A ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u003A",
+                     new String[] {  });
+
+    // ÷ 002C × 0308 ÷ 003A ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u003A",
+                     new String[] {  });
+
+    // ÷ 002C ÷ 002C ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u002C",
+                     new String[] {  });
+
+    // ÷ 002C × 0308 ÷ 002C ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u002C",
+                     new String[] {  });
+
+    // ÷ 002C ÷ 0027 ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0027",
+                     new String[] {  });
+
+    // ÷ 002C × 0308 ÷ 0027 ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u0027",
+                     new String[] {  });
+
+    // ÷ 002C ÷ 0030 ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0030",
+                     new String[] { "\u0030" });
+
+    // ÷ 002C × 0308 ÷ 0030 ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u0030",
+                     new String[] { "\u0030" });
+
+    // ÷ 002C ÷ 005F ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u005F",
+                     new String[] {  });
+
+    // ÷ 002C × 0308 ÷ 005F ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u005F",
+                     new String[] {  });
+
+    // ÷ 002C × 00AD ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u00AD",
+                     new String[] {  });
+
+    // ÷ 002C × 0308 × 00AD ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u00AD",
+                     new String[] {  });
+
+    // ÷ 002C × 0300 ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0300",
+                     new String[] {  });
+
+    // ÷ 002C × 0308 × 0300 ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u0300",
+                     new String[] {  });
+
+    // ÷ 002C ÷ 0061 × 2060 ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0061\u2060",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 002C × 0308 ÷ 0061 × 2060 ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u0061\u2060",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 002C ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0061\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 002C × 0308 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u0061\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 002C ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0061\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 002C × 0308 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u0061\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 002C ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0061\u0027\u2060",
+                     new String[] { "\u0061" });
+
+    // ÷ 002C × 0308 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0061" });
+
+    // ÷ 002C ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0061\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 002C × 0308 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u0061\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 002C ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0031\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 002C × 0308 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u0031\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 002C ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0031\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 002C × 0308 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u0031\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 002C ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0031\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 002C × 0308 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u0031\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 002C ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0031\u002E\u2060",
+                     new String[] { "\u0031" });
+
+    // ÷ 002C × 0308 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u002C\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0031" });
+
+    // ÷ 0027 ÷ 0001 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0001",
+                     new String[] {  });
+
+    // ÷ 0027 × 0308 ÷ 0001 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u0001",
+                     new String[] {  });
+
+    // ÷ 0027 ÷ 000D ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\r",
+                     new String[] {  });
+
+    // ÷ 0027 × 0308 ÷ 000D ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\r",
+                     new String[] {  });
+
+    // ÷ 0027 ÷ 000A ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\n",
+                     new String[] {  });
+
+    // ÷ 0027 × 0308 ÷ 000A ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\n",
+                     new String[] {  });
+
+    // ÷ 0027 ÷ 000B ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u000B",
+                     new String[] {  });
+
+    // ÷ 0027 × 0308 ÷ 000B ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u000B",
+                     new String[] {  });
+
+    // ÷ 0027 ÷ 3031 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u3031",
+                     new String[] { "\u3031" });
+
+    // ÷ 0027 × 0308 ÷ 3031 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u3031",
+                     new String[] { "\u3031" });
+
+    // ÷ 0027 ÷ 0041 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0041",
+                     new String[] { "\u0041" });
+
+    // ÷ 0027 × 0308 ÷ 0041 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u0041",
+                     new String[] { "\u0041" });
+
+    // ÷ 0027 ÷ 003A ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u003A",
+                     new String[] {  });
+
+    // ÷ 0027 × 0308 ÷ 003A ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u003A",
+                     new String[] {  });
+
+    // ÷ 0027 ÷ 002C ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u002C",
+                     new String[] {  });
+
+    // ÷ 0027 × 0308 ÷ 002C ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u002C",
+                     new String[] {  });
+
+    // ÷ 0027 ÷ 0027 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0027",
+                     new String[] {  });
+
+    // ÷ 0027 × 0308 ÷ 0027 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u0027",
+                     new String[] {  });
+
+    // ÷ 0027 ÷ 0030 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0030",
+                     new String[] { "\u0030" });
+
+    // ÷ 0027 × 0308 ÷ 0030 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u0030",
+                     new String[] { "\u0030" });
+
+    // ÷ 0027 ÷ 005F ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u005F",
+                     new String[] {  });
+
+    // ÷ 0027 × 0308 ÷ 005F ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u005F",
+                     new String[] {  });
+
+    // ÷ 0027 × 00AD ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u00AD",
+                     new String[] {  });
+
+    // ÷ 0027 × 0308 × 00AD ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u00AD",
+                     new String[] {  });
+
+    // ÷ 0027 × 0300 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0300",
+                     new String[] {  });
+
+    // ÷ 0027 × 0308 × 0300 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u0300",
+                     new String[] {  });
+
+    // ÷ 0027 ÷ 0061 × 2060 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0061\u2060",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 0027 × 0308 ÷ 0061 × 2060 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u0061\u2060",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 0027 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0061\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 0027 × 0308 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u0061\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 0027 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0061\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 0027 × 0308 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u0061\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 0027 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0061\u0027\u2060",
+                     new String[] { "\u0061" });
+
+    // ÷ 0027 × 0308 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0061" });
+
+    // ÷ 0027 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0061\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 0027 × 0308 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u0061\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 0027 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0031\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 0027 × 0308 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u0031\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 0027 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0031\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 0027 × 0308 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u0031\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 0027 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0031\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 0027 × 0308 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u0031\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 0027 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0031\u002E\u2060",
+                     new String[] { "\u0031" });
+
+    // ÷ 0027 × 0308 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0027\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0031" });
+
+    // ÷ 0030 ÷ 0001 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0001",
+                     new String[] { "\u0030" });
+
+    // ÷ 0030 × 0308 ÷ 0001 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u0001",
+                     new String[] { "\u0030\u0308" });
+
+    // ÷ 0030 ÷ 000D ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\r",
+                     new String[] { "\u0030" });
+
+    // ÷ 0030 × 0308 ÷ 000D ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\r",
+                     new String[] { "\u0030\u0308" });
+
+    // ÷ 0030 ÷ 000A ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\n",
+                     new String[] { "\u0030" });
+
+    // ÷ 0030 × 0308 ÷ 000A ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\n",
+                     new String[] { "\u0030\u0308" });
+
+    // ÷ 0030 ÷ 000B ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u000B",
+                     new String[] { "\u0030" });
+
+    // ÷ 0030 × 0308 ÷ 000B ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u000B",
+                     new String[] { "\u0030\u0308" });
+
+    // ÷ 0030 ÷ 3031 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u3031",
+                     new String[] { "\u0030", "\u3031" });
+
+    // ÷ 0030 × 0308 ÷ 3031 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u3031",
+                     new String[] { "\u0030\u0308", "\u3031" });
+
+    // ÷ 0030 × 0041 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [10.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0041",
+                     new String[] { "\u0030\u0041" });
+
+    // ÷ 0030 × 0308 × 0041 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [10.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u0041",
+                     new String[] { "\u0030\u0308\u0041" });
+
+    // ÷ 0030 ÷ 003A ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u003A",
+                     new String[] { "\u0030" });
+
+    // ÷ 0030 × 0308 ÷ 003A ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u003A",
+                     new String[] { "\u0030\u0308" });
+
+    // ÷ 0030 ÷ 002C ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u002C",
+                     new String[] { "\u0030" });
+
+    // ÷ 0030 × 0308 ÷ 002C ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u002C",
+                     new String[] { "\u0030\u0308" });
+
+    // ÷ 0030 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0027",
+                     new String[] { "\u0030" });
+
+    // ÷ 0030 × 0308 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u0027",
+                     new String[] { "\u0030\u0308" });
+
+    // ÷ 0030 × 0030 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [8.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0030",
+                     new String[] { "\u0030\u0030" });
+
+    // ÷ 0030 × 0308 × 0030 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [8.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u0030",
+                     new String[] { "\u0030\u0308\u0030" });
+
+    // ÷ 0030 × 005F ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u005F",
+                     new String[] { "\u0030\u005F" });
+
+    // ÷ 0030 × 0308 × 005F ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u005F",
+                     new String[] { "\u0030\u0308\u005F" });
+
+    // ÷ 0030 × 00AD ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u00AD",
+                     new String[] { "\u0030\u00AD" });
+
+    // ÷ 0030 × 0308 × 00AD ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u00AD",
+                     new String[] { "\u0030\u0308\u00AD" });
+
+    // ÷ 0030 × 0300 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0300",
+                     new String[] { "\u0030\u0300" });
+
+    // ÷ 0030 × 0308 × 0300 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u0300",
+                     new String[] { "\u0030\u0308\u0300" });
+
+    // ÷ 0030 × 0061 × 2060 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [10.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0061\u2060",
+                     new String[] { "\u0030\u0061\u2060" });
+
+    // ÷ 0030 × 0308 × 0061 × 2060 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [10.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u0061\u2060",
+                     new String[] { "\u0030\u0308\u0061\u2060" });
+
+    // ÷ 0030 × 0061 ÷ 003A ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0061\u003A",
+                     new String[] { "\u0030\u0061" });
+
+    // ÷ 0030 × 0308 × 0061 ÷ 003A ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u0061\u003A",
+                     new String[] { "\u0030\u0308\u0061" });
+
+    // ÷ 0030 × 0061 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0061\u0027",
+                     new String[] { "\u0030\u0061" });
+
+    // ÷ 0030 × 0308 × 0061 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u0061\u0027",
+                     new String[] { "\u0030\u0308\u0061" });
+
+    // ÷ 0030 × 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0061\u0027\u2060",
+                     new String[] { "\u0030\u0061" });
+
+    // ÷ 0030 × 0308 × 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0030\u0308\u0061" });
+
+    // ÷ 0030 × 0061 ÷ 002C ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0061\u002C",
+                     new String[] { "\u0030\u0061" });
+
+    // ÷ 0030 × 0308 × 0061 ÷ 002C ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u0061\u002C",
+                     new String[] { "\u0030\u0308\u0061" });
+
+    // ÷ 0030 × 0031 ÷ 003A ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0031\u003A",
+                     new String[] { "\u0030\u0031" });
+
+    // ÷ 0030 × 0308 × 0031 ÷ 003A ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u0031\u003A",
+                     new String[] { "\u0030\u0308\u0031" });
+
+    // ÷ 0030 × 0031 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0031\u0027",
+                     new String[] { "\u0030\u0031" });
+
+    // ÷ 0030 × 0308 × 0031 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u0031\u0027",
+                     new String[] { "\u0030\u0308\u0031" });
+
+    // ÷ 0030 × 0031 ÷ 002C ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0031\u002C",
+                     new String[] { "\u0030\u0031" });
+
+    // ÷ 0030 × 0308 × 0031 ÷ 002C ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u0031\u002C",
+                     new String[] { "\u0030\u0308\u0031" });
+
+    // ÷ 0030 × 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0031\u002E\u2060",
+                     new String[] { "\u0030\u0031" });
+
+    // ÷ 0030 × 0308 × 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0030\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0030\u0308\u0031" });
+
+    // ÷ 005F ÷ 0001 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0001",
+                     new String[] {  });
+
+    // ÷ 005F × 0308 ÷ 0001 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u0001",
+                     new String[] {  });
+
+    // ÷ 005F ÷ 000D ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\r",
+                     new String[] {  });
+
+    // ÷ 005F × 0308 ÷ 000D ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\r",
+                     new String[] {  });
+
+    // ÷ 005F ÷ 000A ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\n",
+                     new String[] {  });
+
+    // ÷ 005F × 0308 ÷ 000A ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\n",
+                     new String[] {  });
+
+    // ÷ 005F ÷ 000B ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u000B",
+                     new String[] {  });
+
+    // ÷ 005F × 0308 ÷ 000B ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u000B",
+                     new String[] {  });
+
+    // ÷ 005F × 3031 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u3031",
+                     new String[] { "\u005F\u3031" });
+
+    // ÷ 005F × 0308 × 3031 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u3031",
+                     new String[] { "\u005F\u0308\u3031" });
+
+    // ÷ 005F × 0041 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0041",
+                     new String[] { "\u005F\u0041" });
+
+    // ÷ 005F × 0308 × 0041 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u0041",
+                     new String[] { "\u005F\u0308\u0041" });
+
+    // ÷ 005F ÷ 003A ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u003A",
+                     new String[] {  });
+
+    // ÷ 005F × 0308 ÷ 003A ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u003A",
+                     new String[] {  });
+
+    // ÷ 005F ÷ 002C ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u002C",
+                     new String[] {  });
+
+    // ÷ 005F × 0308 ÷ 002C ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u002C",
+                     new String[] {  });
+
+    // ÷ 005F ÷ 0027 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0027",
+                     new String[] {  });
+
+    // ÷ 005F × 0308 ÷ 0027 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u0027",
+                     new String[] {  });
+
+    // ÷ 005F × 0030 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0030",
+                     new String[] { "\u005F\u0030" });
+
+    // ÷ 005F × 0308 × 0030 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u0030",
+                     new String[] { "\u005F\u0308\u0030" });
+
+    // ÷ 005F × 005F ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u005F",
+                     new String[] {  });
+
+    // ÷ 005F × 0308 × 005F ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u005F",
+                     new String[] {  });
+
+    // ÷ 005F × 00AD ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u00AD",
+                     new String[] {  });
+
+    // ÷ 005F × 0308 × 00AD ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u00AD",
+                     new String[] {  });
+
+    // ÷ 005F × 0300 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0300",
+                     new String[] {  });
+
+    // ÷ 005F × 0308 × 0300 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u0300",
+                     new String[] {  });
+
+    // ÷ 005F × 0061 × 2060 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0061\u2060",
+                     new String[] { "\u005F\u0061\u2060" });
+
+    // ÷ 005F × 0308 × 0061 × 2060 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u0061\u2060",
+                     new String[] { "\u005F\u0308\u0061\u2060" });
+
+    // ÷ 005F × 0061 ÷ 003A ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0061\u003A",
+                     new String[] { "\u005F\u0061" });
+
+    // ÷ 005F × 0308 × 0061 ÷ 003A ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u0061\u003A",
+                     new String[] { "\u005F\u0308\u0061" });
+
+    // ÷ 005F × 0061 ÷ 0027 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0061\u0027",
+                     new String[] { "\u005F\u0061" });
+
+    // ÷ 005F × 0308 × 0061 ÷ 0027 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u0061\u0027",
+                     new String[] { "\u005F\u0308\u0061" });
+
+    // ÷ 005F × 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0061\u0027\u2060",
+                     new String[] { "\u005F\u0061" });
+
+    // ÷ 005F × 0308 × 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u0061\u0027\u2060",
+                     new String[] { "\u005F\u0308\u0061" });
+
+    // ÷ 005F × 0061 ÷ 002C ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0061\u002C",
+                     new String[] { "\u005F\u0061" });
+
+    // ÷ 005F × 0308 × 0061 ÷ 002C ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u0061\u002C",
+                     new String[] { "\u005F\u0308\u0061" });
+
+    // ÷ 005F × 0031 ÷ 003A ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0031\u003A",
+                     new String[] { "\u005F\u0031" });
+
+    // ÷ 005F × 0308 × 0031 ÷ 003A ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u0031\u003A",
+                     new String[] { "\u005F\u0308\u0031" });
+
+    // ÷ 005F × 0031 ÷ 0027 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0031\u0027",
+                     new String[] { "\u005F\u0031" });
+
+    // ÷ 005F × 0308 × 0031 ÷ 0027 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u0031\u0027",
+                     new String[] { "\u005F\u0308\u0031" });
+
+    // ÷ 005F × 0031 ÷ 002C ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0031\u002C",
+                     new String[] { "\u005F\u0031" });
+
+    // ÷ 005F × 0308 × 0031 ÷ 002C ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u0031\u002C",
+                     new String[] { "\u005F\u0308\u0031" });
+
+    // ÷ 005F × 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0031\u002E\u2060",
+                     new String[] { "\u005F\u0031" });
+
+    // ÷ 005F × 0308 × 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u005F\u0308\u0031\u002E\u2060",
+                     new String[] { "\u005F\u0308\u0031" });
+
+    // ÷ 00AD ÷ 0001 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0001",
+                     new String[] {  });
+
+    // ÷ 00AD × 0308 ÷ 0001 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u0001",
+                     new String[] {  });
+
+    // ÷ 00AD ÷ 000D ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\r",
+                     new String[] {  });
+
+    // ÷ 00AD × 0308 ÷ 000D ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\r",
+                     new String[] {  });
+
+    // ÷ 00AD ÷ 000A ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\n",
+                     new String[] {  });
+
+    // ÷ 00AD × 0308 ÷ 000A ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\n",
+                     new String[] {  });
+
+    // ÷ 00AD ÷ 000B ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u000B",
+                     new String[] {  });
+
+    // ÷ 00AD × 0308 ÷ 000B ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u000B",
+                     new String[] {  });
+
+    // ÷ 00AD ÷ 3031 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u3031",
+                     new String[] { "\u3031" });
+
+    // ÷ 00AD × 0308 ÷ 3031 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u3031",
+                     new String[] { "\u3031" });
+
+    // ÷ 00AD ÷ 0041 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0041",
+                     new String[] { "\u0041" });
+
+    // ÷ 00AD × 0308 ÷ 0041 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u0041",
+                     new String[] { "\u0041" });
+
+    // ÷ 00AD ÷ 003A ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u003A",
+                     new String[] {  });
+
+    // ÷ 00AD × 0308 ÷ 003A ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u003A",
+                     new String[] {  });
+
+    // ÷ 00AD ÷ 002C ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u002C",
+                     new String[] {  });
+
+    // ÷ 00AD × 0308 ÷ 002C ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u002C",
+                     new String[] {  });
+
+    // ÷ 00AD ÷ 0027 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0027",
+                     new String[] {  });
+
+    // ÷ 00AD × 0308 ÷ 0027 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u0027",
+                     new String[] {  });
+
+    // ÷ 00AD ÷ 0030 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0030",
+                     new String[] { "\u0030" });
+
+    // ÷ 00AD × 0308 ÷ 0030 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u0030",
+                     new String[] { "\u0030" });
+
+    // ÷ 00AD ÷ 005F ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u005F",
+                     new String[] {  });
+
+    // ÷ 00AD × 0308 ÷ 005F ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u005F",
+                     new String[] {  });
+
+    // ÷ 00AD × 00AD ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u00AD",
+                     new String[] {  });
+
+    // ÷ 00AD × 0308 × 00AD ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u00AD",
+                     new String[] {  });
+
+    // ÷ 00AD × 0300 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0300",
+                     new String[] {  });
+
+    // ÷ 00AD × 0308 × 0300 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u0300",
+                     new String[] {  });
+
+    // ÷ 00AD ÷ 0061 × 2060 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0061\u2060",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 00AD × 0308 ÷ 0061 × 2060 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u0061\u2060",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 00AD ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0061\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 00AD × 0308 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u0061\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 00AD ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0061\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 00AD × 0308 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u0061\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 00AD ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0061\u0027\u2060",
+                     new String[] { "\u0061" });
+
+    // ÷ 00AD × 0308 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0061" });
+
+    // ÷ 00AD ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0061\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 00AD × 0308 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u0061\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 00AD ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0031\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 00AD × 0308 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u0031\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 00AD ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0031\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 00AD × 0308 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u0031\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 00AD ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0031\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 00AD × 0308 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u0031\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 00AD ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0031\u002E\u2060",
+                     new String[] { "\u0031" });
+
+    // ÷ 00AD × 0308 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u00AD\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0031" });
+
+    // ÷ 0300 ÷ 0001 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0001",
+                     new String[] {  });
+
+    // ÷ 0300 × 0308 ÷ 0001 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u0001",
+                     new String[] {  });
+
+    // ÷ 0300 ÷ 000D ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\r",
+                     new String[] {  });
+
+    // ÷ 0300 × 0308 ÷ 000D ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\r",
+                     new String[] {  });
+
+    // ÷ 0300 ÷ 000A ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\n",
+                     new String[] {  });
+
+    // ÷ 0300 × 0308 ÷ 000A ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\n",
+                     new String[] {  });
+
+    // ÷ 0300 ÷ 000B ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u000B",
+                     new String[] {  });
+
+    // ÷ 0300 × 0308 ÷ 000B ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u000B",
+                     new String[] {  });
+
+    // ÷ 0300 ÷ 3031 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u3031",
+                     new String[] { "\u3031" });
+
+    // ÷ 0300 × 0308 ÷ 3031 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u3031",
+                     new String[] { "\u3031" });
+
+    // ÷ 0300 ÷ 0041 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0041",
+                     new String[] { "\u0041" });
+
+    // ÷ 0300 × 0308 ÷ 0041 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u0041",
+                     new String[] { "\u0041" });
+
+    // ÷ 0300 ÷ 003A ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u003A",
+                     new String[] {  });
+
+    // ÷ 0300 × 0308 ÷ 003A ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u003A",
+                     new String[] {  });
+
+    // ÷ 0300 ÷ 002C ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u002C",
+                     new String[] {  });
+
+    // ÷ 0300 × 0308 ÷ 002C ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u002C",
+                     new String[] {  });
+
+    // ÷ 0300 ÷ 0027 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0027",
+                     new String[] {  });
+
+    // ÷ 0300 × 0308 ÷ 0027 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u0027",
+                     new String[] {  });
+
+    // ÷ 0300 ÷ 0030 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0030",
+                     new String[] { "\u0030" });
+
+    // ÷ 0300 × 0308 ÷ 0030 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u0030",
+                     new String[] { "\u0030" });
+
+    // ÷ 0300 ÷ 005F ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u005F",
+                     new String[] {  });
+
+    // ÷ 0300 × 0308 ÷ 005F ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u005F",
+                     new String[] {  });
+
+    // ÷ 0300 × 00AD ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u00AD",
+                     new String[] {  });
+
+    // ÷ 0300 × 0308 × 00AD ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u00AD",
+                     new String[] {  });
+
+    // ÷ 0300 × 0300 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0300",
+                     new String[] {  });
+
+    // ÷ 0300 × 0308 × 0300 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u0300",
+                     new String[] {  });
+
+    // ÷ 0300 ÷ 0061 × 2060 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0061\u2060",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 0300 × 0308 ÷ 0061 × 2060 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u0061\u2060",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 0300 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0061\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 0300 × 0308 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u0061\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 0300 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0061\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 0300 × 0308 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u0061\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 0300 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0061\u0027\u2060",
+                     new String[] { "\u0061" });
+
+    // ÷ 0300 × 0308 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0061" });
+
+    // ÷ 0300 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0061\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 0300 × 0308 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u0061\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 0300 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0031\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 0300 × 0308 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u0031\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 0300 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0031\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 0300 × 0308 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u0031\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 0300 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0031\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 0300 × 0308 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u0031\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 0300 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0031\u002E\u2060",
+                     new String[] { "\u0031" });
+
+    // ÷ 0300 × 0308 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0300\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0031" });
+
+    // ÷ 0061 × 2060 ÷ 0001 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0001",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 0061 × 2060 × 0308 ÷ 0001 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0001",
+                     new String[] { "\u0061\u2060\u0308" });
+
+    // ÷ 0061 × 2060 ÷ 000D ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\r",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 0061 × 2060 × 0308 ÷ 000D ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\r",
+                     new String[] { "\u0061\u2060\u0308" });
+
+    // ÷ 0061 × 2060 ÷ 000A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\n",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 0061 × 2060 × 0308 ÷ 000A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\n",
+                     new String[] { "\u0061\u2060\u0308" });
+
+    // ÷ 0061 × 2060 ÷ 000B ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u000B",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 0061 × 2060 × 0308 ÷ 000B ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u000B",
+                     new String[] { "\u0061\u2060\u0308" });
+
+    // ÷ 0061 × 2060 ÷ 3031 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u3031",
+                     new String[] { "\u0061\u2060", "\u3031" });
+
+    // ÷ 0061 × 2060 × 0308 ÷ 3031 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u3031",
+                     new String[] { "\u0061\u2060\u0308", "\u3031" });
+
+    // ÷ 0061 × 2060 × 0041 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0041",
+                     new String[] { "\u0061\u2060\u0041" });
+
+    // ÷ 0061 × 2060 × 0308 × 0041 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0041",
+                     new String[] { "\u0061\u2060\u0308\u0041" });
+
+    // ÷ 0061 × 2060 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u003A",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 0061 × 2060 × 0308 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u003A",
+                     new String[] { "\u0061\u2060\u0308" });
+
+    // ÷ 0061 × 2060 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u002C",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 0061 × 2060 × 0308 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u002C",
+                     new String[] { "\u0061\u2060\u0308" });
+
+    // ÷ 0061 × 2060 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0027",
+                     new String[] { "\u0061\u2060" });
+
+    // ÷ 0061 × 2060 × 0308 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0027",
+                     new String[] { "\u0061\u2060\u0308" });
+
+    // ÷ 0061 × 2060 × 0030 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [9.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0030",
+                     new String[] { "\u0061\u2060\u0030" });
+
+    // ÷ 0061 × 2060 × 0308 × 0030 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0030",
+                     new String[] { "\u0061\u2060\u0308\u0030" });
+
+    // ÷ 0061 × 2060 × 005F ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u005F",
+                     new String[] { "\u0061\u2060\u005F" });
+
+    // ÷ 0061 × 2060 × 0308 × 005F ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u005F",
+                     new String[] { "\u0061\u2060\u0308\u005F" });
+
+    // ÷ 0061 × 2060 × 00AD ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u00AD",
+                     new String[] { "\u0061\u2060\u00AD" });
+
+    // ÷ 0061 × 2060 × 0308 × 00AD ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u00AD",
+                     new String[] { "\u0061\u2060\u0308\u00AD" });
+
+    // ÷ 0061 × 2060 × 0300 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0300",
+                     new String[] { "\u0061\u2060\u0300" });
+
+    // ÷ 0061 × 2060 × 0308 × 0300 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0300",
+                     new String[] { "\u0061\u2060\u0308\u0300" });
+
+    // ÷ 0061 × 2060 × 0061 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0061\u2060",
+                     new String[] { "\u0061\u2060\u0061\u2060" });
+
+    // ÷ 0061 × 2060 × 0308 × 0061 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0061\u2060",
+                     new String[] { "\u0061\u2060\u0308\u0061\u2060" });
+
+    // ÷ 0061 × 2060 × 0061 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0061\u003A",
+                     new String[] { "\u0061\u2060\u0061" });
+
+    // ÷ 0061 × 2060 × 0308 × 0061 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0061\u003A",
+                     new String[] { "\u0061\u2060\u0308\u0061" });
+
+    // ÷ 0061 × 2060 × 0061 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0061\u0027",
+                     new String[] { "\u0061\u2060\u0061" });
+
+    // ÷ 0061 × 2060 × 0308 × 0061 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0061\u0027",
+                     new String[] { "\u0061\u2060\u0308\u0061" });
+
+    // ÷ 0061 × 2060 × 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0061\u0027\u2060",
+                     new String[] { "\u0061\u2060\u0061" });
+
+    // ÷ 0061 × 2060 × 0308 × 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0061\u2060\u0308\u0061" });
+
+    // ÷ 0061 × 2060 × 0061 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0061\u002C",
+                     new String[] { "\u0061\u2060\u0061" });
+
+    // ÷ 0061 × 2060 × 0308 × 0061 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0061\u002C",
+                     new String[] { "\u0061\u2060\u0308\u0061" });
+
+    // ÷ 0061 × 2060 × 0031 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0031\u003A",
+                     new String[] { "\u0061\u2060\u0031" });
+
+    // ÷ 0061 × 2060 × 0308 × 0031 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0031\u003A",
+                     new String[] { "\u0061\u2060\u0308\u0031" });
+
+    // ÷ 0061 × 2060 × 0031 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0031\u0027",
+                     new String[] { "\u0061\u2060\u0031" });
+
+    // ÷ 0061 × 2060 × 0308 × 0031 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0031\u0027",
+                     new String[] { "\u0061\u2060\u0308\u0031" });
+
+    // ÷ 0061 × 2060 × 0031 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0031\u002C",
+                     new String[] { "\u0061\u2060\u0031" });
+
+    // ÷ 0061 × 2060 × 0308 × 0031 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0031\u002C",
+                     new String[] { "\u0061\u2060\u0308\u0031" });
+
+    // ÷ 0061 × 2060 × 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0031\u002E\u2060",
+                     new String[] { "\u0061\u2060\u0031" });
+
+    // ÷ 0061 × 2060 × 0308 × 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0061\u2060\u0308\u0031" });
+
+    // ÷ 0061 ÷ 003A ÷ 0001 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0001",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A × 0308 ÷ 0001 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0001",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A ÷ 000D ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\r",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A × 0308 ÷ 000D ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\r",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A ÷ 000A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\n",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A × 0308 ÷ 000A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\n",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A ÷ 000B ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u000B",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A × 0308 ÷ 000B ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u000B",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A ÷ 3031 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u3031",
+                     new String[] { "\u0061", "\u3031" });
+
+    // ÷ 0061 ÷ 003A × 0308 ÷ 3031 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u3031",
+                     new String[] { "\u0061", "\u3031" });
+
+    // ÷ 0061 × 003A × 0041 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [7.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0041",
+                     new String[] { "\u0061\u003A\u0041" });
+
+    // ÷ 0061 × 003A × 0308 × 0041 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0041",
+                     new String[] { "\u0061\u003A\u0308\u0041" });
+
+    // ÷ 0061 ÷ 003A ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A × 0308 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A × 0308 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A × 0308 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A ÷ 0030 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0030",
+                     new String[] { "\u0061", "\u0030" });
+
+    // ÷ 0061 ÷ 003A × 0308 ÷ 0030 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0030",
+                     new String[] { "\u0061", "\u0030" });
+
+    // ÷ 0061 ÷ 003A ÷ 005F ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u005F",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A × 0308 ÷ 005F ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u005F",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A × 00AD ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u00AD",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A × 0308 × 00AD ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u00AD",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A × 0300 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0300",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 003A × 0308 × 0300 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0300",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 × 003A × 0061 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [7.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0061\u2060",
+                     new String[] { "\u0061\u003A\u0061\u2060" });
+
+    // ÷ 0061 × 003A × 0308 × 0061 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0061\u2060",
+                     new String[] { "\u0061\u003A\u0308\u0061\u2060" });
+
+    // ÷ 0061 × 003A × 0061 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0061\u003A",
+                     new String[] { "\u0061\u003A\u0061" });
+
+    // ÷ 0061 × 003A × 0308 × 0061 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0061\u003A",
+                     new String[] { "\u0061\u003A\u0308\u0061" });
+
+    // ÷ 0061 × 003A × 0061 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0061\u0027",
+                     new String[] { "\u0061\u003A\u0061" });
+
+    // ÷ 0061 × 003A × 0308 × 0061 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0061\u0027",
+                     new String[] { "\u0061\u003A\u0308\u0061" });
+
+    // ÷ 0061 × 003A × 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0061\u0027\u2060",
+                     new String[] { "\u0061\u003A\u0061" });
+
+    // ÷ 0061 × 003A × 0308 × 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0061\u003A\u0308\u0061" });
+
+    // ÷ 0061 × 003A × 0061 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0061\u002C",
+                     new String[] { "\u0061\u003A\u0061" });
+
+    // ÷ 0061 × 003A × 0308 × 0061 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0061\u002C",
+                     new String[] { "\u0061\u003A\u0308\u0061" });
+
+    // ÷ 0061 ÷ 003A ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0031\u003A",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 003A × 0308 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0031\u003A",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 003A ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0031\u0027",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 003A × 0308 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0031\u0027",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 003A ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0031\u002C",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 003A × 0308 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0031\u002C",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 003A ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0031\u002E\u2060",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 003A × 0308 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 0027 ÷ 0001 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0001",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 0308 ÷ 0001 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0001",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 ÷ 000D ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\r",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 0308 ÷ 000D ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\r",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 ÷ 000A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\n",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 0308 ÷ 000A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\n",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 ÷ 000B ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u000B",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 0308 ÷ 000B ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u000B",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 ÷ 3031 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u3031",
+                     new String[] { "\u0061", "\u3031" });
+
+    // ÷ 0061 ÷ 0027 × 0308 ÷ 3031 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u3031",
+                     new String[] { "\u0061", "\u3031" });
+
+    // ÷ 0061 × 0027 × 0041 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0041",
+                     new String[] { "\u0061\u0027\u0041" });
+
+    // ÷ 0061 × 0027 × 0308 × 0041 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0041",
+                     new String[] { "\u0061\u0027\u0308\u0041" });
+
+    // ÷ 0061 ÷ 0027 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 0308 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 0308 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 0308 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 ÷ 0030 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0030",
+                     new String[] { "\u0061", "\u0030" });
+
+    // ÷ 0061 ÷ 0027 × 0308 ÷ 0030 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0030",
+                     new String[] { "\u0061", "\u0030" });
+
+    // ÷ 0061 ÷ 0027 ÷ 005F ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u005F",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 0308 ÷ 005F ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u005F",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 00AD ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u00AD",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 0308 × 00AD ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u00AD",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 0300 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0300",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 0308 × 0300 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0300",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 × 0027 × 0061 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0061\u2060",
+                     new String[] { "\u0061\u0027\u0061\u2060" });
+
+    // ÷ 0061 × 0027 × 0308 × 0061 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0061\u2060",
+                     new String[] { "\u0061\u0027\u0308\u0061\u2060" });
+
+    // ÷ 0061 × 0027 × 0061 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0061\u003A",
+                     new String[] { "\u0061\u0027\u0061" });
+
+    // ÷ 0061 × 0027 × 0308 × 0061 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0061\u003A",
+                     new String[] { "\u0061\u0027\u0308\u0061" });
+
+    // ÷ 0061 × 0027 × 0061 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0061\u0027",
+                     new String[] { "\u0061\u0027\u0061" });
+
+    // ÷ 0061 × 0027 × 0308 × 0061 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0061\u0027",
+                     new String[] { "\u0061\u0027\u0308\u0061" });
+
+    // ÷ 0061 × 0027 × 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0061\u0027\u2060",
+                     new String[] { "\u0061\u0027\u0061" });
+
+    // ÷ 0061 × 0027 × 0308 × 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0061\u0027\u0308\u0061" });
+
+    // ÷ 0061 × 0027 × 0061 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0061\u002C",
+                     new String[] { "\u0061\u0027\u0061" });
+
+    // ÷ 0061 × 0027 × 0308 × 0061 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0061\u002C",
+                     new String[] { "\u0061\u0027\u0308\u0061" });
+
+    // ÷ 0061 ÷ 0027 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0031\u003A",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 0027 × 0308 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0031\u003A",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 0027 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0031\u0027",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 0027 × 0308 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0031\u0027",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 0027 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0031\u002C",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 0027 × 0308 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0031\u002C",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 0027 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0031\u002E\u2060",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 0027 × 0308 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 0027 × 2060 ÷ 0001 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0001",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0001 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0001",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 ÷ 000D ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\r",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 000D ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\r",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 ÷ 000A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\n",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 000A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\n",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 ÷ 000B ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u000B",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 000B ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u000B",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 ÷ 3031 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u3031",
+                     new String[] { "\u0061", "\u3031" });
+
+    // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 3031 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u3031",
+                     new String[] { "\u0061", "\u3031" });
+
+    // ÷ 0061 × 0027 × 2060 × 0041 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0041",
+                     new String[] { "\u0061\u0027\u2060\u0041" });
+
+    // ÷ 0061 × 0027 × 2060 × 0308 × 0041 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0041",
+                     new String[] { "\u0061\u0027\u2060\u0308\u0041" });
+
+    // ÷ 0061 ÷ 0027 × 2060 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 ÷ 0030 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0030",
+                     new String[] { "\u0061", "\u0030" });
+
+    // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0030 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0030",
+                     new String[] { "\u0061", "\u0030" });
+
+    // ÷ 0061 ÷ 0027 × 2060 ÷ 005F ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u005F",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 005F ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u005F",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 × 00AD ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u00AD",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 × 0308 × 00AD ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u00AD",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 × 0300 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0300",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 × 0308 × 0300 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0300",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 × 0027 × 2060 × 0061 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0061\u2060",
+                     new String[] { "\u0061\u0027\u2060\u0061\u2060" });
+
+    // ÷ 0061 × 0027 × 2060 × 0308 × 0061 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0061\u2060",
+                     new String[] { "\u0061\u0027\u2060\u0308\u0061\u2060" });
+
+    // ÷ 0061 × 0027 × 2060 × 0061 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0061\u003A",
+                     new String[] { "\u0061\u0027\u2060\u0061" });
+
+    // ÷ 0061 × 0027 × 2060 × 0308 × 0061 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0061\u003A",
+                     new String[] { "\u0061\u0027\u2060\u0308\u0061" });
+
+    // ÷ 0061 × 0027 × 2060 × 0061 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0061\u0027",
+                     new String[] { "\u0061\u0027\u2060\u0061" });
+
+    // ÷ 0061 × 0027 × 2060 × 0308 × 0061 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0061\u0027",
+                     new String[] { "\u0061\u0027\u2060\u0308\u0061" });
+
+    // ÷ 0061 × 0027 × 2060 × 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0061\u0027\u2060",
+                     new String[] { "\u0061\u0027\u2060\u0061" });
+
+    // ÷ 0061 × 0027 × 2060 × 0308 × 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0061\u0027\u2060\u0308\u0061" });
+
+    // ÷ 0061 × 0027 × 2060 × 0061 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0061\u002C",
+                     new String[] { "\u0061\u0027\u2060\u0061" });
+
+    // ÷ 0061 × 0027 × 2060 × 0308 × 0061 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0061\u002C",
+                     new String[] { "\u0061\u0027\u2060\u0308\u0061" });
+
+    // ÷ 0061 ÷ 0027 × 2060 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0031\u003A",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0031\u003A",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 0027 × 2060 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0031\u0027",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0031\u0027",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 0027 × 2060 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0031\u002C",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0031\u002C",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 0027 × 2060 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0031\u002E\u2060",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 002C ÷ 0001 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0001",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 0001 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0001",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C ÷ 000D ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\r",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 000D ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\r",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C ÷ 000A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\n",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 000A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\n",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C ÷ 000B ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u000B",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 000B ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u000B",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C ÷ 3031 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u3031",
+                     new String[] { "\u0061", "\u3031" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 3031 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u3031",
+                     new String[] { "\u0061", "\u3031" });
+
+    // ÷ 0061 ÷ 002C ÷ 0041 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0041",
+                     new String[] { "\u0061", "\u0041" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 0041 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0041",
+                     new String[] { "\u0061", "\u0041" });
+
+    // ÷ 0061 ÷ 002C ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u003A",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u002C",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0027",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C ÷ 0030 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0030",
+                     new String[] { "\u0061", "\u0030" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 0030 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0030",
+                     new String[] { "\u0061", "\u0030" });
+
+    // ÷ 0061 ÷ 002C ÷ 005F ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u005F",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 005F ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u005F",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C × 00AD ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u00AD",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C × 0308 × 00AD ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u00AD",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C × 0300 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0300",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C × 0308 × 0300 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0300",
+                     new String[] { "\u0061" });
+
+    // ÷ 0061 ÷ 002C ÷ 0061 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0061\u2060",
+                     new String[] { "\u0061", "\u0061\u2060" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 0061 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0061\u2060",
+                     new String[] { "\u0061", "\u0061\u2060" });
+
+    // ÷ 0061 ÷ 002C ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0061\u003A",
+                     new String[] { "\u0061", "\u0061" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0061\u003A",
+                     new String[] { "\u0061", "\u0061" });
+
+    // ÷ 0061 ÷ 002C ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0061\u0027",
+                     new String[] { "\u0061", "\u0061" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0061\u0027",
+                     new String[] { "\u0061", "\u0061" });
+
+    // ÷ 0061 ÷ 002C ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0061\u0027\u2060",
+                     new String[] { "\u0061", "\u0061" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0061", "\u0061" });
+
+    // ÷ 0061 ÷ 002C ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0061\u002C",
+                     new String[] { "\u0061", "\u0061" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0061\u002C",
+                     new String[] { "\u0061", "\u0061" });
+
+    // ÷ 0061 ÷ 002C ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0031\u003A",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0031\u003A",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 002C ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0031\u0027",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0031\u0027",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 002C ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0031\u002C",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0031\u002C",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 002C ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0031\u002E\u2060",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0061 ÷ 002C × 0308 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0061", "\u0031" });
+
+    // ÷ 0031 ÷ 003A ÷ 0001 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0001",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 0001 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0001",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A ÷ 000D ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\r",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 000D ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\r",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A ÷ 000A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\n",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 000A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\n",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A ÷ 000B ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u000B",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 000B ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u000B",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A ÷ 3031 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u3031",
+                     new String[] { "\u0031", "\u3031" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 3031 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u3031",
+                     new String[] { "\u0031", "\u3031" });
+
+    // ÷ 0031 ÷ 003A ÷ 0041 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0041",
+                     new String[] { "\u0031", "\u0041" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 0041 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0041",
+                     new String[] { "\u0031", "\u0041" });
+
+    // ÷ 0031 ÷ 003A ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A ÷ 0030 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0030",
+                     new String[] { "\u0031", "\u0030" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 0030 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0030",
+                     new String[] { "\u0031", "\u0030" });
+
+    // ÷ 0031 ÷ 003A ÷ 005F ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u005F",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 005F ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u005F",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A × 00AD ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u00AD",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A × 0308 × 00AD ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u00AD",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A × 0300 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0300",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A × 0308 × 0300 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0300",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 003A ÷ 0061 × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0061\u2060",
+                     new String[] { "\u0031", "\u0061\u2060" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 0061 × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0061\u2060",
+                     new String[] { "\u0031", "\u0061\u2060" });
+
+    // ÷ 0031 ÷ 003A ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0061\u003A",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0061\u003A",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 003A ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0061\u0027",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0061\u0027",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 003A ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0061\u0027\u2060",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 003A ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0061\u002C",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0061\u002C",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 003A ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0031\u003A",
+                     new String[] { "\u0031", "\u0031" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 0031 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0031\u003A",
+                     new String[] { "\u0031", "\u0031" });
+
+    // ÷ 0031 ÷ 003A ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0031\u0027",
+                     new String[] { "\u0031", "\u0031" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 0031 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0031\u0027",
+                     new String[] { "\u0031", "\u0031" });
+
+    // ÷ 0031 ÷ 003A ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0031\u002C",
+                     new String[] { "\u0031", "\u0031" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 0031 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0031\u002C",
+                     new String[] { "\u0031", "\u0031" });
+
+    // ÷ 0031 ÷ 003A ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0031\u002E\u2060",
+                     new String[] { "\u0031", "\u0031" });
+
+    // ÷ 0031 ÷ 003A × 0308 ÷ 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0031", "\u0031" });
+
+    // ÷ 0031 ÷ 0027 ÷ 0001 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0001",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 × 0308 ÷ 0001 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0001",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 ÷ 000D ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\r",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 × 0308 ÷ 000D ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\r",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 ÷ 000A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\n",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 × 0308 ÷ 000A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\n",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 ÷ 000B ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u000B",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 × 0308 ÷ 000B ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u000B",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 ÷ 3031 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u3031",
+                     new String[] { "\u0031", "\u3031" });
+
+    // ÷ 0031 ÷ 0027 × 0308 ÷ 3031 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u3031",
+                     new String[] { "\u0031", "\u3031" });
+
+    // ÷ 0031 ÷ 0027 ÷ 0041 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0041",
+                     new String[] { "\u0031", "\u0041" });
+
+    // ÷ 0031 ÷ 0027 × 0308 ÷ 0041 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0041",
+                     new String[] { "\u0031", "\u0041" });
+
+    // ÷ 0031 ÷ 0027 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 × 0308 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 × 0308 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 × 0308 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 × 0027 × 0030 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [11.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0030",
+                     new String[] { "\u0031\u0027\u0030" });
+
+    // ÷ 0031 × 0027 × 0308 × 0030 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0030",
+                     new String[] { "\u0031\u0027\u0308\u0030" });
+
+    // ÷ 0031 ÷ 0027 ÷ 005F ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u005F",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 × 0308 ÷ 005F ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u005F",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 × 00AD ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u00AD",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 × 0308 × 00AD ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u00AD",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 × 0300 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0300",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 × 0308 × 0300 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0300",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 0027 ÷ 0061 × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0061\u2060",
+                     new String[] { "\u0031", "\u0061\u2060" });
+
+    // ÷ 0031 ÷ 0027 × 0308 ÷ 0061 × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0061\u2060",
+                     new String[] { "\u0031", "\u0061\u2060" });
+
+    // ÷ 0031 ÷ 0027 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0061\u003A",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 0027 × 0308 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0061\u003A",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 0027 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0061\u0027",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 0027 × 0308 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0061\u0027",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 0027 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0061\u0027\u2060",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 0027 × 0308 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 0027 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0061\u002C",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 0027 × 0308 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0061\u002C",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 × 0027 × 0031 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0031\u003A",
+                     new String[] { "\u0031\u0027\u0031" });
+
+    // ÷ 0031 × 0027 × 0308 × 0031 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0031\u003A",
+                     new String[] { "\u0031\u0027\u0308\u0031" });
+
+    // ÷ 0031 × 0027 × 0031 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0031\u0027",
+                     new String[] { "\u0031\u0027\u0031" });
+
+    // ÷ 0031 × 0027 × 0308 × 0031 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0031\u0027",
+                     new String[] { "\u0031\u0027\u0308\u0031" });
+
+    // ÷ 0031 × 0027 × 0031 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0031\u002C",
+                     new String[] { "\u0031\u0027\u0031" });
+
+    // ÷ 0031 × 0027 × 0308 × 0031 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0031\u002C",
+                     new String[] { "\u0031\u0027\u0308\u0031" });
+
+    // ÷ 0031 × 0027 × 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0031\u002E\u2060",
+                     new String[] { "\u0031\u0027\u0031" });
+
+    // ÷ 0031 × 0027 × 0308 × 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0031\u0027\u0308\u0031" });
+
+    // ÷ 0031 ÷ 002C ÷ 0001 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0001",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C × 0308 ÷ 0001 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0001",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C ÷ 000D ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\r",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C × 0308 ÷ 000D ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\r",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C ÷ 000A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\n",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C × 0308 ÷ 000A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\n",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C ÷ 000B ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u000B",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C × 0308 ÷ 000B ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u000B",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C ÷ 3031 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u3031",
+                     new String[] { "\u0031", "\u3031" });
+
+    // ÷ 0031 ÷ 002C × 0308 ÷ 3031 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u3031",
+                     new String[] { "\u0031", "\u3031" });
+
+    // ÷ 0031 ÷ 002C ÷ 0041 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0041",
+                     new String[] { "\u0031", "\u0041" });
+
+    // ÷ 0031 ÷ 002C × 0308 ÷ 0041 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0041",
+                     new String[] { "\u0031", "\u0041" });
+
+    // ÷ 0031 ÷ 002C ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C × 0308 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C × 0308 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C × 0308 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 × 002C × 0030 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [11.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0030",
+                     new String[] { "\u0031\u002C\u0030" });
+
+    // ÷ 0031 × 002C × 0308 × 0030 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0030",
+                     new String[] { "\u0031\u002C\u0308\u0030" });
+
+    // ÷ 0031 ÷ 002C ÷ 005F ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u005F",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C × 0308 ÷ 005F ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u005F",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C × 00AD ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u00AD",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C × 0308 × 00AD ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u00AD",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C × 0300 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0300",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C × 0308 × 0300 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0300",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002C ÷ 0061 × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0061\u2060",
+                     new String[] { "\u0031", "\u0061\u2060" });
+
+    // ÷ 0031 ÷ 002C × 0308 ÷ 0061 × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0061\u2060",
+                     new String[] { "\u0031", "\u0061\u2060" });
+
+    // ÷ 0031 ÷ 002C ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0061\u003A",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 002C × 0308 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0061\u003A",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 002C ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0061\u0027",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 002C × 0308 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0061\u0027",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 002C ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0061\u0027\u2060",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 002C × 0308 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 002C ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0061\u002C",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 002C × 0308 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0061\u002C",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 × 002C × 0031 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0031\u003A",
+                     new String[] { "\u0031\u002C\u0031" });
+
+    // ÷ 0031 × 002C × 0308 × 0031 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0031\u003A",
+                     new String[] { "\u0031\u002C\u0308\u0031" });
+
+    // ÷ 0031 × 002C × 0031 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0031\u0027",
+                     new String[] { "\u0031\u002C\u0031" });
+
+    // ÷ 0031 × 002C × 0308 × 0031 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0031\u0027",
+                     new String[] { "\u0031\u002C\u0308\u0031" });
+
+    // ÷ 0031 × 002C × 0031 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0031\u002C",
+                     new String[] { "\u0031\u002C\u0031" });
+
+    // ÷ 0031 × 002C × 0308 × 0031 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0031\u002C",
+                     new String[] { "\u0031\u002C\u0308\u0031" });
+
+    // ÷ 0031 × 002C × 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0031\u002E\u2060",
+                     new String[] { "\u0031\u002C\u0031" });
+
+    // ÷ 0031 × 002C × 0308 × 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0031\u002C\u0308\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 ÷ 0001 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0001",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0001 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] <START OF HEADING> (Other) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0001",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 ÷ 000D ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\r",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 000D ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\r",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 ÷ 000A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\n",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 000A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE FEED (LF)> (LF) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\n",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 ÷ 000B ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u000B",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 000B ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.11] <LINE TABULATION> (Newline) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u000B",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 ÷ 3031 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u3031",
+                     new String[] { "\u0031", "\u3031" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 3031 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u3031",
+                     new String[] { "\u0031", "\u3031" });
+
+    // ÷ 0031 ÷ 002E × 2060 ÷ 0041 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0041",
+                     new String[] { "\u0031", "\u0041" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0041 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0041",
+                     new String[] { "\u0031", "\u0041" });
+
+    // ÷ 0031 ÷ 002E × 2060 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u003A",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u002C",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0027",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 × 002E × 2060 × 0030 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0030",
+                     new String[] { "\u0031\u002E\u2060\u0030" });
+
+    // ÷ 0031 × 002E × 2060 × 0308 × 0030 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ZERO (Numeric) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0030",
+                     new String[] { "\u0031\u002E\u2060\u0308\u0030" });
+
+    // ÷ 0031 ÷ 002E × 2060 ÷ 005F ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u005F",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 005F ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u005F",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 00AD ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u00AD",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 0308 × 00AD ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u00AD",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 0300 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0300",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 0308 × 0300 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0300",
+                     new String[] { "\u0031" });
+
+    // ÷ 0031 ÷ 002E × 2060 ÷ 0061 × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0061\u2060",
+                     new String[] { "\u0031", "\u0061\u2060" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0061 × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0061\u2060",
+                     new String[] { "\u0031", "\u0061\u2060" });
+
+    // ÷ 0031 ÷ 002E × 2060 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0061\u003A",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0061 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0061\u003A",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 002E × 2060 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0061\u0027",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0061 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0061\u0027",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 002E × 2060 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0061\u0027\u2060",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0061 ÷ 0027 × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0061\u0027\u2060",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 002E × 2060 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0061\u002C",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0061 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0061\u002C",
+                     new String[] { "\u0031", "\u0061" });
+
+    // ÷ 0031 × 002E × 2060 × 0031 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0031\u003A",
+                     new String[] { "\u0031\u002E\u2060\u0031" });
+
+    // ÷ 0031 × 002E × 2060 × 0308 × 0031 ÷ 003A ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0031\u003A",
+                     new String[] { "\u0031\u002E\u2060\u0308\u0031" });
+
+    // ÷ 0031 × 002E × 2060 × 0031 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0031\u0027",
+                     new String[] { "\u0031\u002E\u2060\u0031" });
+
+    // ÷ 0031 × 002E × 2060 × 0308 × 0031 ÷ 0027 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0031\u0027",
+                     new String[] { "\u0031\u002E\u2060\u0308\u0031" });
+
+    // ÷ 0031 × 002E × 2060 × 0031 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0031\u002C",
+                     new String[] { "\u0031\u002E\u2060\u0031" });
+
+    // ÷ 0031 × 002E × 2060 × 0308 × 0031 ÷ 002C ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0031\u002C",
+                     new String[] { "\u0031\u002E\u2060\u0308\u0031" });
+
+    // ÷ 0031 × 002E × 2060 × 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0031\u002E\u2060",
+                     new String[] { "\u0031\u002E\u2060\u0031" });
+
+    // ÷ 0031 × 002E × 2060 × 0308 × 0031 ÷ 002E × 2060 ÷	#  ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0031\u002E\u2060",
+                     new String[] { "\u0031\u002E\u2060\u0308\u0031" });
+
+    // ÷ 0063 × 0061 × 006E × 0027 × 0074 ÷	#  ÷ [0.2] LATIN SMALL LETTER C (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER N (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN SMALL LETTER T (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0063\u0061\u006E\u0027\u0074",
+                     new String[] { "\u0063\u0061\u006E\u0027\u0074" });
+
+    // ÷ 0063 × 0061 × 006E × 2019 × 0074 ÷	#  ÷ [0.2] LATIN SMALL LETTER C (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER N (ALetter) × [6.0] RIGHT SINGLE QUOTATION MARK (MidNumLet) × [7.0] LATIN SMALL LETTER T (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0063\u0061\u006E\u2019\u0074",
+                     new String[] { "\u0063\u0061\u006E\u2019\u0074" });
+
+    // ÷ 0061 × 0062 × 00AD × 0062 × 0079 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER B (ALetter) × [4.0] SOFT HYPHEN (Format_FE) × [5.0] LATIN SMALL LETTER B (ALetter) × [5.0] LATIN SMALL LETTER Y (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0062\u00AD\u0062\u0079",
+                     new String[] { "\u0061\u0062\u00AD\u0062\u0079" });
+
+    // ÷ 0061 ÷ 0024 ÷ 002D ÷ 0033 × 0034 × 002C × 0035 × 0036 × 0037 × 002E × 0031 × 0034 ÷ 0025 ÷ 0062 ÷	#  ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] DOLLAR SIGN (Other) ÷ [999.0] HYPHEN-MINUS (Other) ÷ [999.0] DIGIT THREE (Numeric) × [8.0] DIGIT FOUR (Numeric) × [12.0] COMMA (MidNum) × [11.0] DIGIT FIVE (Numeric) × [8.0] DIGIT SIX (Numeric) × [8.0] DIGIT SEVEN (Numeric) × [12.0] FULL STOP (MidNumLet) × [11.0] DIGIT ONE (Numeric) × [8.0] DIGIT FOUR (Numeric) ÷ [999.0] PERCENT SIGN (Other) ÷ [999.0] LATIN SMALL LETTER B (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0061\u0024\u002D\u0033\u0034\u002C\u0035\u0036\u0037\u002E\u0031\u0034\u0025\u0062",
+                     new String[] { "\u0061", "\u0033\u0034\u002C\u0035\u0036\u0037\u002E\u0031\u0034", "\u0062" });
+
+    // ÷ 0033 × 0061 ÷	#  ÷ [0.2] DIGIT THREE (Numeric) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u0033\u0061",
+                     new String[] { "\u0033\u0061" });
+
+    // ÷ 2060 ÷ 0063 × 2060 × 0061 × 2060 × 006E × 2060 × 0027 × 2060 × 0074 × 2060 × 2060 ÷	#  ÷ [0.2] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER C (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER N (ALetter) × [4.0] WORD JOINER (Format_FE) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER T (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u2060\u0063\u2060\u0061\u2060\u006E\u2060\u0027\u2060\u0074\u2060\u2060",
+                     new String[] { "\u0063\u2060\u0061\u2060\u006E\u2060\u0027\u2060\u0074\u2060\u2060" });
+
+    // ÷ 2060 ÷ 0063 × 2060 × 0061 × 2060 × 006E × 2060 × 2019 × 2060 × 0074 × 2060 × 2060 ÷	#  ÷ [0.2] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER C (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER N (ALetter) × [4.0] WORD JOINER (Format_FE) × [6.0] RIGHT SINGLE QUOTATION MARK (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER T (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u2060\u0063\u2060\u0061\u2060\u006E\u2060\u2019\u2060\u0074\u2060\u2060",
+                     new String[] { "\u0063\u2060\u0061\u2060\u006E\u2060\u2019\u2060\u0074\u2060\u2060" });
+
+    // ÷ 2060 ÷ 0061 × 2060 × 0062 × 2060 × 00AD × 2060 × 0062 × 2060 × 0079 × 2060 × 2060 ÷	#  ÷ [0.2] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER B (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] SOFT HYPHEN (Format_FE) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER B (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER Y (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u2060\u0061\u2060\u0062\u2060\u00AD\u2060\u0062\u2060\u0079\u2060\u2060",
+                     new String[] { "\u0061\u2060\u0062\u2060\u00AD\u2060\u0062\u2060\u0079\u2060\u2060" });
+
+    // ÷ 2060 ÷ 0061 × 2060 ÷ 0024 × 2060 ÷ 002D × 2060 ÷ 0033 × 2060 × 0034 × 2060 × 002C × 2060 × 0035 × 2060 × 0036 × 2060 × 0037 × 2060 × 002E × 2060 × 0031 × 2060 × 0034 × 2060 ÷ 0025 × 2060 ÷ 0062 × 2060 × 2060 ÷	#  ÷ [0.2] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DOLLAR SIGN (Other) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] HYPHEN-MINUS (Other) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DIGIT THREE (Numeric) × [4.0] WORD JOINER (Format_FE) × [8.0] DIGIT FOUR (Numeric) × [4.0] WORD JOINER (Format_FE) × [12.0] COMMA (MidNum) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT FIVE (Numeric) × [4.0] WORD JOINER (Format_FE) × [8.0] DIGIT SIX (Numeric) × [4.0] WORD JOINER (Format_FE) × [8.0] DIGIT SEVEN (Numeric) × [4.0] WORD JOINER (Format_FE) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT ONE (Numeric) × [4.0] WORD JOINER (Format_FE) × [8.0] DIGIT FOUR (Numeric) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] PERCENT SIGN (Other) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER B (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u2060\u0061\u2060\u0024\u2060\u002D\u2060\u0033\u2060\u0034\u2060\u002C\u2060\u0035\u2060\u0036\u2060\u0037\u2060\u002E\u2060\u0031\u2060\u0034\u2060\u0025\u2060\u0062\u2060\u2060",
+                     new String[] { "\u0061\u2060", "\u0033\u2060\u0034\u2060\u002C\u2060\u0035\u2060\u0036\u2060\u0037\u2060\u002E\u2060\u0031\u2060\u0034\u2060", "\u0062\u2060\u2060" });
+
+    // ÷ 2060 ÷ 0033 × 2060 × 0061 × 2060 × 2060 ÷	#  ÷ [0.2] WORD JOINER (Format_FE) ÷ [999.0] DIGIT THREE (Numeric) × [4.0] WORD JOINER (Format_FE) × [10.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] WORD JOINER (Format_FE) ÷ [0.3]
+    assertAnalyzesTo(analyzer, "\u2060\u0033\u2060\u0061\u2060\u2060",
+                     new String[] { "\u0033\u2060\u0061\u2060\u2060" });
+
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/email.addresses.from.random.text.with.email.addresses.txt b/lucene/backwards/src/test/org/apache/lucene/analysis/email.addresses.from.random.text.with.email.addresses.txt
new file mode 100644
index 0000000..832a2aa
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/email.addresses.from.random.text.with.email.addresses.txt
@@ -0,0 +1,265 @@
+dJ8ngFi@avz13m.CC
+JCAVLRJg@3aqiq2yui.gm
+kU-l6DS@[082.015.228.189]
+37layCJS@j5NVP7NWAY.VG
+"%U@?\B"@Fl2d.md
+aH3QW@tw8uo2.eu
+Bvd#@tupjv.sn
+SBMm0Nm.oyk70.rMNdd8k.#ru3LI.gMMLBI.0dZRD4d.RVK2nY@au58t.B13albgy4u.mt
+DvdUJk@61zwkit7dkd3rcq4v.BD
+~+Kdz@3mousnl.SE
+C'ts`@Vh4zk.uoafcft-dr753x4odt04q.UY
+}0tzWYDBuy@cSRQAABB9B.7c8xawf75-cyo.PM
+lMahAA.j/5.RqUjS745.DtkcYdi@d2-4gb-l6.ae
+V85E9Hx7@vpf0bs.bz
+MGBg2@7F3MJTCCPROS8YETM0B4-C9P7WXKGFB0.RU
+rsBWOCJ@lYX0SILY4L53Z3VJPSF6.pwrawr.vdpoq.nz
+dIyLrU@9A40T2ZIG7H8R.t63.tv
+6dAsZKz@d33XR.IR
+EnqCC@2bk6da6y08.LI
+AQ9yV@Mfqq32nexufgxzl4o7q5jv3kd.lb
+lv'p@tqk.vj5s0tgl.0dlu7su3iyiaz.dqso.494.3hb76.XN--MGBAAM7A8H
+b6/zomNkV@8jwm-he.IN
+5FLuakz.hXVkuqDt@iBFP83V6MNI3N0FRWJ9302DS-0KHRV6O.1bf59kj64uj5b6e2zfn.cm
+RhIwkU@58vmet9yfddpg.3adkmhrv1px.AO
+nEBk6w2Q@Bb5ib.2pay.so
+AlW5CMAn@qos-53u.j91qq96d4en129szf7099kxv5lo6yo.gm
+QPYBDV3.Ah/h8U@x3v444pzi.1cvgokam.PW
+5Iwbiq7@p9s-2pixps9jwzyhfroxqivw8sv90r.xn--wgbh1c
+AaFU9L@3yj1xqf1.cz9.ac
+|iCmQ1@rum6w0a7wt.3QLD.ht71.cx
+EhLTUjo@rEK.sJ44H0.GR
+bHEbq3Rp@33.lKSSMY.9xaurtfle9xe.iu4810l.fj
+eFcup.cPPEW@[1ae]
+p907@bk3o.fvtmw2m2.Uutr83x2yt4.2nuin.EU
+PpW2L5.QgP2n@9rz7.a5qi.oRH1Z.8ov.UZ
+o8UgG5fewm4vr9Ai5wPS@sgh.2F-OLKLZ81DIUET.xpya0vtx.fj
+aixQH@z-y.AR
+jVTeWQfL."M#~t Q"@1e.oglq.ubk.SZ
+6e5QQuy@N7.2cuw3x2wpddf.paycp1pc.AI
+IqG6Fl@[220.112.120.54]
+lWHH4eWSn@tbxyb7.jhzqxrk.lv
+P1zO*RaAr@[111.99.108.22]
+d00gy@[4TC]
+1yNINoBU@[136.003.010.238]
+Ms8ox@[_3Tuehr]
+wtWDNo@1sjmcbbli196-765mt7m8o8hywft.7-ga6rsnum8v.np
+"x)yO"@7le5o2rcud5ngs.Qmfmq.Jfxv8.Zznv6t6il.MIL
+1hXd@f8.1kxqd3yw4j6zmb7l7.US
+"8}(\$"@mu2viak0nh4sj5ivgpy1wqie.HK
+Th7XoAs5@ggdb.BI
+5iDbhah.xdtF1x@[59.55.12.243]
+j2ovALlgm2Wcwx@5jphzt.TN
+ZlaP~E.4Yk1K0F@lF6VN.M5.Nj.PRO
+cFCvIJAw@l93H0R1W6V4RI0AY7RLRQR4KOEVQPEG-PDTF03V4D9A0.xZZK5.lu
+8Ju2AW@1n.h7.vu
+"\nkP]{"@[Vej\yo\HD]
+fKWC?@qgcb.xn--mgbaam7a8h
+L4BbaB@hv1.BIZ
+WvSmV@qpx15vzmbtxzvi-syndl1.ML
+"3|PX~Cbdq"@U3vp-7k.8c4q3sgpwt6sochundzhx.museum
+LjH9rJTu@tkm.gy
+vQgXEFb@maxmrbk-5a5s6o.6MZZ6IK.awjbtiva7.IL
+6TVbIA@r50eh-a.la
+AaASl@Bsteea.qHXE3Q5CUJ3DBG.S2hvnld.4WJWL.fk
+"CN;\-z 6M"@86.qc7s.23p.ET
+zX3=O3o@Yjov.7g660.8M88OJGTDC5.np
+QFZlK1A@4W47EIXE.KY
+1guLnQb07k@ab.ccemuif2s.lb
+Jddxj@[111.079.109.147]
+Hj06gcE@[105.233.192.168]
+u8?xicQ@[i\21I]
+CczYer}W@bezu6wtys9s.lft3z.mobi
+OmpYhIL@6GJ7P29EIE-G63RDW7GLFLFC0M1.AERO
+2RRPLqO@8lh0i.vm7xmvvo-r5nf0x.CY
+TOc!BhbKz@F-myy7.kQWSUI7S3.net
+"0\!P?".shQVdSerA@2qmqj8ul.hm
+LTLNFsgB@[191.56.104.113]
+iT0LOq.jtPW=G06~cETxl2ge@Ah0.4hn72v.tQ.LU
+VGLn@z3E2.3an2.MM
+TWmfsxn@[112.192.017.029]
+2tP07A@2twe6u0d6uw6o.sed7n.109mx.XN--KGBECHTV
+CjaPC63@['\RDrwk]
+Ayydpdoa@tdgypppmen.wf
+"gfKP9"@jo3-r0.mz
+aTMgDW4@t5gax.XN--0ZWM56D
+mcDrMO3FQ@nwc21.y5qd45lesryrp.IL
+NZqj@v50egeveepk.z290kk.Bc3.xn--jxalpdlp
+XtAhFnq@[218.214.251.103]
+x0S8uos@[109.82.126.233]
+ALB4KFavj16pODdd@i206d6s.MM
+grxIt96.46nCf@nokjogh2l4.nCMWXG.yt
+Fgbh7@2rxkk0bvkk-v3evd-sh56gvhxlh.hhjcsg36j8qt98okjbdj9z574xdpix59zf6h80r.Gyb4rrxu.ve
+uo0AX41@Fhlegm1z57j-qvf5.p8jo6zvm.sc
+sjn4cz@9ktlwkqte.bv
+b04v0Ct@[243.230.224.190]
+F!FUbQHU@uvz7cu1l.ciz4h2.93U4V.gb
+6CHec@nONUKT.nl
+zbmZiXw@yb.bxxp.3fm457.va
+"/GdiZ7f"@[221.229.46.3]
+NJde8Li@f7a.g51VICBH.cy
+6IeAft@e-3fp.Nkh7nm8.v8i47xvrv27r.pf
+TC*Qopzb@xIOB3.6egz4.m-24t5wmxtmco4iy8g91o66mjgha1vjlepyffott.E5ta.p9.CF
+"_3Sc_"@[193.165.124.143]
+W0dwHf@[25.174.65.80]
+qPkkP0@4k0vs.oaak2z.3JMTI.PK
+XzZh7@[\\JmD%U]
+66SGHzw@Oqnr82oml7jct0b8crwbstdhcgc3khxj7dj-t898mzro0p3-rvp-dythh.TN
+ot4tPF@[AY\j]
+e4seIFbl@cib.cg
+B2w025e@r2H7BW16B24DG1S5DED.bg
+atweEde@blk-3y.mgvoh6l9my.F6.FI
+uDoPcRGW@rEBD5LUT.ly
+2KQhx@Bba.u--9b5bc0.NF
+tKWc2VjVRYD@[254.190.162.128]
+wc3W16^@D3v2uxqqeclz.w1fd529m.DM
+Njg@6S8MA.HK
+"L\^4z]92"@0qp--walx.MIL
+X08sWFD@62GNK.tN4.f1YXX.ug
+eK6Bz1Bu@[rX;J&036]
+"~`o\:"@hO4UKF.oZBWV56B.cmn.DJ
+lcgUakx@[pjGd&i2]
+BqdBTnv3c@wf35nwaza.ME
+"a#Um{:\'\bX:"@in7tjo.uw8wil.gp
+ApIbER8'@[&Y]
+JTsM0c!s9CzEH@Sd.mh
+hy2AOUc@uqxzl7v0hl2nchokqit9lyscxaa0jaqya1wek5gkd.NC
+pY7bAVD4r@[,>T*R T]
+!0axBT@03-gdh1xmk3x9.GH
+vbtyQBZI@20al5g.ro6ds4.Bsg15f5.NU
+2^ZhSK-FFYOh@Z2iku.rg.Z0ca1.gs
+G1RLpOn."yfJpg["@mXEV8.mu
+yrBKNkq@a2a1.Aifn.Ta2.dj
+Wok5G@b5aqobvi5.ni
+nXz9i.=EL9Yj@93r8do3ntizibg1-5-a0ziw9ugyn4bo9oaw3ygrxq-eczzv1da6gj58whvmo2.rs
+Dp63hd@B1kbahyq.PL
+y01rn27SFq@o0HNP8.C5.i4rvj8j338zgter7er5rkwyo5g.atnc0iuj2ke.8or6ekq0x.IO
+0RiEo@08mnvbu.p661ernzjz5p7nbyix5iuj.cig5hgvcc.SO
+Dwxab5@1sx5y3-umsy72nl.74lwye5.DJ
+IvdZVE4xRk@0vw7ajl.AR
+CvQxhXJ@d5a7qnx.ke
+n7MxA4~@[4(R]
+RFGzu3hD0@wbh4.sm
+eOADW}BcNG@2568p3b4v.Xq3eksr.GP
+AsAMWriW7.zSDQSAR6@Gg2q4rtgr.GG
+cDCVlA0t@[20.116.229.216]
+c=yJU+3L5@n2x3xhksf.gvreani.MZ
+wfYnaA4@lzojy.4oii6w6sn-p9.kh
+kdeOQ5F@vD5Y.wmmv.7rswz.1zelobcp5qxxwzjn.fOEJZ.KM
+ppULqb2Z@Hv9o2ui.AO
+tOHw@[IPv6:3500:8B6C::CB5E:1.124.160.137]
+MWLVsL@7nhliy.O8mjon3rj-kb.t8d6bcpa5i.au
+BN0EY@hh9v.p9bwgs.TN
+RgiAp@d9ln.bf
+PBugBo@97gcz.DJ
+Fh#dKzbI@[+_]
+wyqU-C9hXE@wPRBUI-WS9HXE19.LV
+muC?Js@[IPv6:47FB:5786:4b5e::5675]
+yLTT2xV@wdoszw9k1ork-z-t.kq.l3SEO.Lb4jx0.NA
+6zqw.yPV4LkL@dA3XKC.eg
+S5z9i7i3s@Vzt6.fr
+L|Sit6s@9cklii1.tf
+yWYqz@mw-9k.FJ
+Knhj419mAfftf@R26hxll64.3qtdx6g.AL
+aZYHUr6@Shyn76c67.65grky.am
+ZYxn6Px@di0cqhtg.hu
+"#mLl"@w1sc0g3vm.j1o4o9g.GW
+WYJcFp@653xk-89oprk2im.iemhx9.CC
+y5AXi@[Oa #]
+nZErAGj@6sq3-p.r8KQ.aero
+OMq5sBK@udg-5zp1.Dory85.SG
+2bymd@Ojla1hvfpw8rrihrx.cy
+5OMbw0@r2d8cn75.1VR2BJ0J3A8PY.gc0mljc-h.COOP
+al6X^pQkx@pyj--2hp.lbet.TN
+NkzPW4f@2-0.aaoqccwrgi4olytac0imp6vvphsuobrr115eygh2xwkvzeuj.tl
+"4-b9|/,\e]h]2"@9-iiahsdlzv-v65j.FK
+g8Pv2hb9@[166.176.68.63]
+"IA~".Tn03w7@[\>J?]
+E6aK9TaJ@j0hydmxhkq2q.Svku4saky.MU
+rdF2Zl1@9fsic.C17pw9o0.vn
+pCKjPa88DG&x5a@4ha07ia2jk.xk7xe8.PM
+qgLb5m@nynqp.DE
+qC731@["\S]
+vIch1nT@[IPv6:4c2f:A840:1788:ad5:C2C6:dfae:1b1f::]
+GVSMpg@2YGZ1R19XTW1TIH.Re3vg30u1xq6v7cj1wf-6m14939wvgqbl.93mztd.SG
+0jq4v7PMxm@eq6teog.kO6LR3.x2p.53yltrsvgpd3.RO
+zdGLZD0P@i2JQNM8.816oja8pkk5zkvyx.KM
+Jp#hSH@74zkerax4.31kr.7c9-yuk.mp
+Kx^0oZn@oFFA-URZ13B34J.DK
+sub52@aoq7.iHF.CH
+jfVSq9oAR2D@iGU0.7bp3x.4cr.sz
+nalgU@Yfpbdcv8a5.n9kwz6kyi2u.thic-rws.af.TG
+=uC5qVT@56g530cltpekrw.pt
+QR5&kx@7qhi3bhav5ga0eva.b0sdom.bb
+8DZQ7@dtr16r89fdw59q.cf
+Q4pNw@6o-9weojl3r7.LS
+*mfOc_CN@[G\3]
+2p`tbG@c767inolrav0hg6a-ucs.y0.tw
+Rop{cgBy@Wekdh0xns2um.UK
+t*p05lV@017y.MR
+7ZxO80@Dovepwr4l.qxfzchrn1.es8ul0vavi6gqy82.K1hc7.INT
+C_Iphp@5t4rtc.id
+q+m2x@Cfw.1tm52-kr.BO
+47NIL@Hl68os0.66l9bsf2q.SC
+vi0LyF9O@p74jz6mxby.it
+xQ4jU@rQVWLWAD3T8.4-lnu.AZ
+zea_0Kr@[97.59.144.249]
+5HP1k|s@[068.150.236.123]
+5XJZlmYk.3Du5qee@[072.023.197.244]
+AvNrIHB0@[+n}oV]
+"!N7/I\zhh"@[204.037.067.146]
+vlJODxFF@xFO6V.i1.fgad6bjy.NO
+qDe0FA@xpp1le82ndircjgyrxyzkrqu3il.oUKHVV6829P-16JILWG62KN.cr
+pMF64@wssq6kh9uhxk.cA2YZVBV4JW.xX585A.ru
+G3meE@[^!'OO]
+"1@0UYJl"@vplkx.d2n.i3tcx3aaxut.lbb3v9.ldq.me
+iTH0QND@wg9sizy.lr
+9kF?opSTo9rSDWLo&W&6@xrh32ibf.F0zb6kb.BJ
+a0FI1m@1olkdpz.W70a3w8qmk3.NA
+"0H}r}X(p\M`/x"@rY48LPH.Axy.Ue624.TV
+AQL6YBFb@Hxawb15okz.y4.y5c0e.bt
+PEaNVR@m8NH9BVX5L096DRM7YTR.er
+diI`Q@i5fpkuc.7zg2av.D6tzqq.CK
+TCN0-Z@Tezeq9ejv.ekeab8hz14hui.il
+05SnFh@jZ85JXZ.1RO99W5FYK3.uyv7g15.MP
+B2Z76Rn@9yce0shfsydxetu1v4-y.rBU2M0.6ik8oapv0zho6n653il25gu4rd216uw03.MG
+vGZ2K@C2osgjtel5uerwn.riihbabhh41ve84.r3l.vH6S64.vn
+Nv2ZgL@[037.054.177.155]
+WsdI2W@i1ULFQ1.79qfph2.eg
+vJfpTf3@Hh4x2h.25m0idq3.fr
+oRqbgftr@l6jg0.TV
+NiynsKb@k9BTX4-FV.hc0skm-o.lv
+w9uGwf@4hop8.Jb9655is.nr
+"NVUW+"@6jbe.KM
+QusHU6JMR@0RXKIZNH76C3.Oqwcfr779e.MH
+}C5IwKv1S45vlmPaaVHhF@[IPv6:EBF6::]
+T7rXlYc@4AI1LM.2o.uk
+uuCiDC6c@Maar3.65hlg-wf.t3pt9.FJ
+w2mNOvIUh@dx3ep7ew.ru
+b#Add@9hpopo.Xg3tbjchdpt.TT
+NtrgJjfj."NBwi"@[142.085.096.018]
+00lF9UB@2NR2.rs
+MPr42ye9@p08lcrzs.4bzxfznsh2bhgsa.CX
+awwLoYLn~c2LfTEVT@fwksx.qoj94r11kw19k50k3.gd
+gRZ5w9epm@p6adico3auugj5qklec.Sm4bx5.li
+zfdZ67Y@1azhq.dl3xxzni2.rrj.lpclc6g4d.sl
+vTWwSD4fb@uBSOHD.3g.u3mb.gf
+cYFVxcC6E@F9g0b.n1339r.AU
+pnuXl@s1alo2.tc
+lKy64zp.Cbg8BM@y0S.6uiux8h8.0udipt.ma
+|9FDgc@vbrz.3L.av4kmt.rs
+skcHAu7@xD715N1.DZ
+BfcgHK3@[220.136.9.224]
+LCOEag@Gwm.drsa0.GL
+qrNZtp3vO@a0gr.8j9cvcgy0p-3.HN
+lfW2rei20XWSmpQoPY1Dl@[(N&c]
+WFBBEv|@q7R2J.oy48740.pm
+6H6rPx@zVJ40.xgyat.cLUX6SVFJWMLF9EZ2PL8QQEU7U1WT0JW3QR8898ALFGKO18CF1DOX89DR.1tfu30mp.CA
+ytG@J4auwv4has.PS
+"X;+N1A\A "@rc9cln0xyy8wa6axedojj9r0slj0v.Luy9i6ipqrz74lm5-n6f1-2srq5vdo-opef747ubdykv5hc.2lztpe.er
+DQTmqL4LVRUvuvoNb8=TT@2up3.PY
+NC0OPLz@kcru1s0mu.name
+kBoJf{XaGl@[248.166.223.221]
+pEjZPm8A@v956Y7GQV.5uu6.Ribgf20u.6e.0do1nki1t.ahy.6iy.sm
+pIFWkl2@w9N0Q.MC
+p=VTtlpC@w3ttqb.FO
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/generateJavaUnicodeWordBreakTest.pl b/lucene/backwards/src/test/org/apache/lucene/analysis/generateJavaUnicodeWordBreakTest.pl
new file mode 100644
index 0000000..bff17a6
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/generateJavaUnicodeWordBreakTest.pl
@@ -0,0 +1,206 @@
+#!/usr/bin/perl
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+use warnings;
+use strict;
+use File::Spec;
+use Getopt::Long;
+use LWP::UserAgent;
+
+my ($volume, $directory, $script_name) = File::Spec->splitpath($0);
+
+my $version = '';
+unless (GetOptions("version=s" => \$version) && $version =~ /\d+\.\d+\.\d+/) {
+  print STDERR "Usage: $script_name -v <version>\n";
+  print STDERR "\tversion must be of the form X.Y.Z, e.g. 5.2.0\n"
+      if ($version);
+  exit 1;
+}
+my $url_prefix = "http://www.unicode.org/Public/${version}/ucd";
+my $scripts_url = "${url_prefix}/Scripts.txt";
+my $line_break_url = "${url_prefix}/LineBreak.txt";
+my $word_break_url = "${url_prefix}/auxiliary/WordBreakProperty.txt";
+my $word_break_test_url = "${url_prefix}/auxiliary/WordBreakTest.txt";
+my $underscore_version = $version;
+$underscore_version =~ s/\./_/g;
+my $class_name = "WordBreakTestUnicode_${underscore_version}";
+my $output_filename = "${class_name}.java";
+my $header =<<"__HEADER__";
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.junit.Ignore;
+
+/**
+ * This class was automatically generated by ${script_name}
+ * from: ${url_prefix}/auxiliary/WordBreakTest.txt
+ *
+ * WordBreakTest.txt indicates the points in the provided character sequences
+ * at which conforming implementations must and must not break words.  This
+ * class tests for expected token extraction from each of the test sequences
+ * in WordBreakTest.txt, where the expected tokens are those character
+ * sequences bounded by word breaks and containing at least one character
+ * from one of the following character sets:
+ *
+ *    \\p{Script = Han}                (From $scripts_url)
+ *    \\p{Script = Hiragana}
+ *    \\p{LineBreak = Complex_Context} (From $line_break_url)
+ *    \\p{WordBreak = ALetter}         (From $word_break_url)
+ *    \\p{WordBreak = Katakana}
+ *    \\p{WordBreak = Numeric}         (Excludes full-width Arabic digits)
+ *    [\\uFF10-\\uFF19]                 (Full-width Arabic digits)
+ */
+\@Ignore
+public class ${class_name} extends BaseTokenStreamTestCase {
+
+  public void test(Analyzer analyzer) throws Exception {
+__HEADER__
+
+my $codepoints = [];
+map { $codepoints->[$_] = 1 } (0xFF10..0xFF19);
+# Complex_Context is an alias for 'SA', which is used in LineBreak.txt
+# Using lowercase versions of property value names to allow for case-
+# insensitive comparison with the names in the Unicode data files.
+parse_Unicode_data_file($line_break_url, $codepoints, {'sa' => 1});
+parse_Unicode_data_file($scripts_url, $codepoints, 
+                        {'han' => 1, 'hiragana' => 1});
+parse_Unicode_data_file($word_break_url, $codepoints,
+                        {'aletter' => 1, 'katakana' => 1, 'numeric' => 1});
+my @tests = split /\r?\n/, get_URL_content($word_break_test_url);
+
+my $output_path = File::Spec->catpath($volume, $directory, $output_filename);
+open OUT, ">$output_path"
+  || die "Error opening '$output_path' for writing: $!";
+
+print STDERR "Writing '$output_path'...";
+
+print OUT $header;
+
+for my $line (@tests) {
+  next if ($line =~ /^\s*\#/);
+  # ÷ 0001 × 0300 ÷  #  ÷ [0.2] <START OF HEADING> (Other) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3]
+  my ($sequence) = $line =~ /^(.*?)\s*\#/;
+  print OUT "    // $line\n";
+  $sequence =~ s/\s*÷\s*$//; # Trim trailing break character
+  my $test_string = $sequence;
+  $test_string =~ s/\s*÷\s*/\\u/g;
+  $test_string =~ s/\s*×\s*/\\u/g;
+  $test_string =~ s/\\u000A/\\n/g;
+  $test_string =~ s/\\u000D/\\r/g;
+  $sequence =~ s/^\s*÷\s*//; # Trim leading break character
+  my @tokens = ();
+  for my $candidate (split /\s*÷\s*/, $sequence) {
+    my @chars = ();
+    my $has_wanted_char = 0;
+    while ($candidate =~ /([0-9A-F]+)/gi) {
+      push @chars, $1;
+      unless ($has_wanted_char) {
+        $has_wanted_char = 1 if (defined($codepoints->[hex($1)]));
+      }
+    }
+    if ($has_wanted_char) {
+      push @tokens, '"'.join('', map { "\\u$_" } @chars).'"';
+    }
+  }
+  print OUT "    assertAnalyzesTo(analyzer, \"${test_string}\",\n";
+  print OUT "                     new String[] { ";
+  print OUT join(", ", @tokens), " });\n\n";
+}
+
+print OUT "  }\n}\n";
+close OUT;
+print STDERR "done.\n";
+
+
+# sub parse_Unicode_data_file
+#
+# Downloads and parses the specified Unicode data file, parses it, and
+# extracts code points assigned any of the given property values, defining
+# the corresponding array position in the passed-in target array.
+#
+# Takes in the following parameters:
+#
+#  - URL of the Unicode data file to download and parse
+#  - Reference to target array
+#  - Reference to hash of property values to get code points for
+#
+sub parse_Unicode_data_file {
+  my $url = shift;
+  my $target = shift;
+  my $wanted_property_values = shift;
+  my $content = get_URL_content($url);
+  print STDERR "Parsing '$url'...";
+  my @lines = split /\r?\n/, $content;
+  for (@lines) {
+    s/\s*#.*//;         # Strip trailing comments
+    s/\s+$//;           # Strip trailing space
+    next unless (/\S/); # Skip empty lines
+    my ($start, $end, $property_value);
+    if (/^([0-9A-F]{4,5})\s*;\s*(.+)/i) {
+      # 00AA       ; LATIN
+      $start = $end = hex $1;
+      $property_value = lc $2; # Property value names are case-insensitive
+    } elsif (/^([0-9A-F]{4,5})..([0-9A-F]{4,5})\s*;\s*(.+)/i) {
+      # 0AE6..0AEF ; Gujarati
+      $start = hex $1;
+      $end = hex $2;
+      $property_value = lc $3; # Property value names are case-insensitive
+    } else {
+      next;
+    }
+    if (defined($wanted_property_values->{$property_value})) {
+      for my $code_point ($start..$end) {
+        $target->[$code_point] = 1;
+      }
+    }
+  }
+  print STDERR "done.\n";
+}
+
+# sub get_URL_content
+#
+# Retrieves and returns the content of the given URL.
+#
+sub get_URL_content {
+  my $url = shift;
+  print STDERR "Retrieving '$url'...";
+  my $user_agent = LWP::UserAgent->new;
+  my $request = HTTP::Request->new(GET => $url);
+  my $response = $user_agent->request($request);
+  unless ($response->is_success) {
+    print STDERR "Failed to download '$url':\n\t",$response->status_line,"\n";
+    exit 1;
+  }
+  print STDERR "done.\n";
+  return $response->content;
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/porterTestData.zip b/lucene/backwards/src/test/org/apache/lucene/analysis/porterTestData.zip
new file mode 100644
index 0000000..ccb55e5
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/porterTestData.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/random.text.with.email.addresses.txt b/lucene/backwards/src/test/org/apache/lucene/analysis/random.text.with.email.addresses.txt
new file mode 100644
index 0000000..71ac34c
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/random.text.with.email.addresses.txt
@@ -0,0 +1,427 @@
+=========
+This file was generated in part (i.e. without the email addresses)
+by the random text generator at:
+<http://johno.jsmf.net/knowhow/ngrams/index.php?table=en-rosalixion-word-2gram&paragraphs=20&length=200&suppress-quotes=on&no-ads=on>
+=========
+waist and Wintja are relearning how dJ8ngFi@avz13m.CC we spread out, but it
+here before, our dimension of story. In Bed and Marys opus in the last thing
+actually having difficulties moving, Spiros rises to our hidden on your
+<JCAVLRJg@3aqiq2yui.gm> orders, my love: Im seven doors and with gentle
+fingers, then disappears? Whats the idea <kU-l6DS@[082.015.228.189]> of
+<37layCJS@j5NVP7NWAY.VG> the "%U@?\B"@Fl2d.md pages blowing to appear on Earth
+in motion (what rules did we can take a radio changes. A VOICE: Hes a
+scoundrel. VOICES: Burn him! Burn him! SPIROS: Want to team of the couple is
+the sweetest love aH3QW@tw8uo2.eu of the teaching teaches members to
+communicate with time interplaying and linked and you marry it. It will leave
+Bvd#@tupjv.sn the logic of it from hereing those people were all
+SBMm0Nm.oyk70.rMNdd8k.#ru3LI.gMMLBI.0dZRD4d.RVK2nY@au58t.B13albgy4u.mt the
+artist stray? Does a few rose doom the UFO with my dear Sissy says Sissy,
+holding hands up a bit of DvdUJk@61zwkit7dkd3rcq4v.BD fate falls asleep. When
+an internet age is ~+Kdz@3mousnl.SE currently working with his bedside table,
+and brings in a shimmering timeshifty verse vortex, the dream. Victory is
+hallucination, my hand for more. Mmm my head,
+C'ts`@Vh4zk.uoafcft-dr753x4odt04q.UY in five. (Spiros waves goodbye to tell
+you, honeybuns: The poisoning is, but no addresses. A message identical reach
+across the script. }0tzWYDBuy@cSRQAABB9B.7c8xawf75-cyo.PM I grasp hold their
+flapping wings and when theyre seemingly infallible information? Bookshrine of
+a sip of defined the Great Horned Goddess of no feeling.) Meaw. FFIANA: So,
+darling. Dont be dry white and teases him back
+lMahAA.j/5.RqUjS745.DtkcYdi@d2-4gb-l6.ae in society not speaking, giggling
+V85E9Hx7@vpf0bs.bz in MGBg2@7F3MJTCCPROS8YETM0B4-C9P7WXKGFB0.RU the boring
+f***s! (She leaves and Him Lover, Outlanders. Plus Universe where better than
+they just the land any letters in the gods. Expected, this at the threesome get
+even touching myself. rsBWOCJ@lYX0SILY4L53Z3VJPSF6.pwrawr.vdpoq.nz He picks
+dIyLrU@9A40T2ZIG7H8R.t63.tv up at our harem world 6dAsZKz@d33XR.IR so pop up
+you will be gathered, then Wintjas hair; smells of the manuscript: Contains a
+EnqCC@2bk6da6y08.LI common AQ9yV@Mfqq32nexufgxzl4o7q5jv3kd.lb universal within
+this lv'p@tqk.vj5s0tgl.0dlu7su3iyiaz.dqso.494.3hb76.XN--MGBAAM7A8H web.
+b6/zomNkV@8jwm-he.IN The
+5FLuakz.hXVkuqDt@iBFP83V6MNI3N0FRWJ9302DS-0KHRV6O.1bf59kj64uj5b6e2zfn.cm cosmos
+is filled with soap bubbles. <RhIwkU@58vmet9yfddpg.3adkmhrv1px.AO> I cant
+concentrate with a nearby and he nEBk6w2Q@Bb5ib.2pay.so pours.
+<AlW5CMAn@qos-53u.j91qq96d4en129szf7099kxv5lo6yo.gm> Its a wine with the joke
+in the only good enough! It hit again the house. He thinks of terrorist, this
+water. They were in verbatim rewritable. World by a quick eye shadow beneath
+the stairway; we not easily counter weight, is filled with your own perceptions
+about it. (Eve, how to talk to you really turns on its physics. The lover on
+the sunflower in worship of the? (She smiles.) Greet
+<QPYBDV3.Ah/h8U@x3v444pzi.1cvgokam.PW> it makes sense$A!-(B Not really,
+5Iwbiq7@p9s-2pixps9jwzyhfroxqivw8sv90r.xn--wgbh1c from up in the candlelight,
+denser <AaFU9L@3yj1xqf1.cz9.ac> medium to say something. Shifting of that
+|iCmQ1@rum6w0a7wt.3QLD.ht71.cx the eyes and there came. And now, approaching.
+When the thing. What did I woke up the printers! We EhLTUjo@rEK.sJ44H0.GR shall
+we are heard like a glimpse of hyperspace. It travels further and kneeled down
+bHEbq3Rp@33.lKSSMY.9xaurtfle9xe.iu4810l.fj to you can walk away? FFIANA: I want
+to eFcup.cPPEW@[1ae] speak. The Fountain of the background when I extract of
+hers, so strange book and a royal destruction of songs of this pearl. Not often
+by an incinerator vessel. Spiros, the delivery of alien exists now. Forward.
+The rosy guidance of wine. Notices that is partly the pipe
+p907@bk3o.fvtmw2m2.Uutr83x2yt4.2nuin.EU of the chance in Old Town. D Strange
+music keeps one of the top of myth and smiles.) SPIROS: Nope, cant even
+PpW2L5.QgP2n@9rz7.a5qi.oRH1Z.8ov.UZ more! says it doesnt exist! The world in
+the cosmos loves us. (Spiros soon
+o8UgG5fewm4vr9Ai5wPS@sgh.2F-OLKLZ81DIUET.xpya0vtx.fj here again aixQH@z-y.AR
+and again he turns and blinks with you want? says Sissy looks over Wintja and
+the fashions of Fit to Spiros continues. Its a situation of the barman says
+Spiros. I read the river. SPIROS: Damn I said. 69
+<jVTeWQfL."M#~t Q"@1e.oglq.ubk.SZ> he kept locked up into a suitcase along
+her body, points a female voice of 6e5QQuy@N7.2cuw3x2wpddf.paycp1pc.AI their
+part of flowers, and Marys opus IqG6Fl@[220.112.120.54] in my PROSECUTOR: Hes
+<lWHH4eWSn@tbxyb7.jhzqxrk.lv> one is <P1zO*RaAr@[111.99.108.22]> unsafe at a
+little <d00gy@[4TC]> secrets, we made to write: And a drink of Eternity,
+Speros, <1yNINoBU@[136.003.010.238]> Mr Boore, back to me! Lovers break
+Ms8ox@[_3Tuehr] the code so
+<8'Hk8a@ksf7qqaa7616xw8dq80h.K6fy89c.3k-8c.g58m48v-18zh8v> recap.29 28 So,
+darling. Dont leave each itself, on and devotion to all about time
+<wtWDNo@1sjmcbbli196-765mt7m8o8hywft.7-ga6rsnum8v.np> has happened? ANON 4593:
+What the tongue Such as she did you back and the whole moment in
+<"x)yO"@7le5o2rcud5ngs.Qmfmq.Jfxv8.Zznv6t6il.MIL> your own lens, thank you
+1hXd@f8.1kxqd3yw4j6zmb7l7.US arent already. It tastes them have ever come come!
+The tomb. Blink to him and flips to it, but the palace. No
+"8}(\$"@mu2viak0nh4sj5ivgpy1wqie.HK way$A!-(B Happily: You smell of it
+all and yet sure this pool Th7XoAs5@ggdb.BI of the first of his
+5iDbhah.xdtF1x@[59.55.12.243] heart j2ovALlgm2Wcwx@5jphzt.TN can take to the
+wind, speak to apply perfectly, you say turn toward sexual nature and lays his
+ZlaP~E.4Yk1K0F@lF6VN.M5.Nj.PRO pipe. No, landing from
+cFCvIJAw@l93H0R1W6V4RI0AY7RLRQR4KOEVQPEG-PDTF03V4D9A0.xZZK5.lu the fruit will
+say. -F�Dont talk like the west 8Ju2AW@1n.h7.vu wing of the letter in every
+second, <"\nkP]{"@[Vej\yo\HD]> but he slipped in. Yours Spiros and there
+when I imagined anything can take returning? <fKWC?@qgcb.xn--mgbaam7a8h> Where?
+With? Who? Going toward his body and kisses the notion that has joined odds. A
+scattered around <L4BbaB@hv1.BIZ> slowly, moving eyes on and
+WvSmV@qpx15vzmbtxzvi-syndl1.ML turns toward her. She sips some way everything
+began was finished my wet Earth. Warning
+"3|PX~Cbdq"@U3vp-7k.8c4q3sgpwt6sochundzhx.museum for me.-A City Different.
+Let your myth LjH9rJTu@tkm.gy settles over it
+<8myMO4@hOV209VZ-SHGBIH5FBYLTCQZSBW-U5-1.dv9> means to Our of a book he has
+only but <vQgXEFb@maxmrbk-5a5s6o.6MZZ6IK.awjbtiva7.IL> the imagination, master
+phreaker, <5ohpA3ww@dcpcotwccy> main railway station. Loses the dreamadoory in
+the surprising success.) A note from round is her splendour in them? Mmm my
+dear, were 6TVbIA@r50eh-a.la from them keywords. Boy,
+AaASl@Bsteea.qHXE3Q5CUJ3DBG.S2hvnld.4WJWL.fk my own imagination, master
+"CN;\-z 6M"@86.qc7s.23p.ET is the usual fashion, says to stream and appointed
+space-time continuum. Dilutes your zX3=O3o@Yjov.7g660.8M88OJGTDC5.np sleep. Ive
+been seen, he says the ringnot we proved? (On the pact. Thanateros is an
+internet caf� where the Queen. Now cmon, lets take to raise the apartment. Like
+a limousine and I kiss timelord slides his hand QFZlK1A@4W47EIXE.KY in words
+now. Get us in the same time conceptualisation is to bed. STEFANDIS: Dont do
+you think Ive put down the green lush. She often by God of a 15 minutes. The
+others knew into the 1guLnQb07k@ab.ccemuif2s.lb you-know-what. Youre the luxury
+hotel. Diamonds and receive the process of action. We wanted in the nominated
+bird. The <Jddxj@[111.079.109.147]> woman undressing. He has him just get at
+Hotel California. Its <Hj06gcE@[105.233.192.168]> about all devices. Playlist?
+Initiating playlist. Timelock? Timelock on. We have a u8?xicQ@[i\21I] lock of
+the apartment. Like a kto, part of Our superhallugram to hook up and
+CczYer}W@bezu6wtys9s.lft3z.mobi outs. polish
+OmpYhIL@6GJ7P29EIE-G63RDW7GLFLFC0M1.AERO fills the crowd, comes from the music
+is impossible. SPIROS: F***. You are your voo goo.
+<2RRPLqO@8lh0i.vm7xmvvo-r5nf0x.CY> Daysends burn deeply and will take
+TOc!BhbKz@F-myy7.kQWSUI7S3.net this he thinks. For UFO from elsewhere. Bzzz!
+Bzzzzzzzz! Bzzzzzzzzzzzzzzz! Tell them "0\!P?".shQVdSerA@2qmqj8ul.hm the leg
+of LTLNFsgB@[191.56.104.113] all, until it has read it is
+iT0LOq.jtPW=G06~cETxl2ge@Ah0.4hn72v.tQ.LU there. <VGLn@z3E2.3an2.MM> Once
+TWmfsxn@[112.192.017.029] Spiros under the place
+2tP07A@2twe6u0d6uw6o.sed7n.109mx.XN--KGBECHTV as were not a house of the
+rosebushes and the whateverend, feel her waist. She changes everything. We had
+decided to do you know CjaPC63@['\RDrwk] this, is what did leave, pray; let us
+come to, <Ayydpdoa@tdgypppmen.wf> what history as died. Strange, Spiros with
+delight: That night "gfKP9"@jo3-r0.mz and gold case
+<aTMgDW4@t5gax.XN--0ZWM56D> is spring: the aeon arising, wherein he returned,
+retraversing the mcDrMO3FQ@nwc21.y5qd45lesryrp.IL gates, first
+<NZqj@v50egeveepk.z290kk.Bc3.xn--jxalpdlp> to reach session. Initiating first
+part of the main hall toward his own spurs. Hes an <XtAhFnq@[218.214.251.103]>
+Irifix And older ones who wins? ADAM: x0S8uos@[109.82.126.233] The violin and
+reality. The hidden set up to come. ROSE WAKINS: No answer. The
+ALB4KFavj16pODdd@i206d6s.MM rosy pink cigarette.) Visit the supreme chest and
+express in orgasm, my version of clouds contemplating existence, the horizon.
+Best grxIt96.46nCf@nokjogh2l4.nCMWXG.yt of sheer emotion. Spiros laughs. Why
+did he says Spiros. Ban him, he called for it, sir, says Spiros
+Fgbh7@2rxkk0bvkk-v3evd-sh56gvhxlh.hhjcsg36j8qt98okjbdj9z574xdpix59zf6h80r.Gyb4rrxu.ve
+laughs. uo0AX41@Fhlegm1z57j-qvf5.p8jo6zvm.sc Can we determined that when I am
+Spiros, quoting Jim Morrison. Death. Design patterns, youll hear Spiros says.
+They cant G decide if he was your key that we playing? SPIROS: Why wont xxx
+would be imagined. Technology so beautiful to fill his diary; I like a match.
+Puffs. The Star Eagle. And a person with a play with. sjn4cz@9ktlwkqte.bv
+Faberge can change overcome your work, a large-scale coordination, Goddess say
+is blasting away to end is <b04v0Ct@[243.230.224.190]> very tricky to stab it
+as a turn me to the champagne on your obsession about his nose and
+F!FUbQHU@uvz7cu1l.ciz4h2.93U4V.gb somewhere <6CHec@nONUKT.nl> else, then far
+stretch. The great outdoors), puffing dried cum on the manuscript I$A!-(B O
+one knee, feeling and sex in igniting <zbmZiXw@yb.bxxp.3fm457.va> bomb. (A
+housefly, Musca domestica, lands on into the device. Let me met. Wintja and
+victory. <"/GdiZ7f"@[221.229.46.3]> For years in tipsy bliss. SISSY: (Nods.)
+Yes. Now you witch. And we must remember, will tell you move but her
+NJde8Li@f7a.g51VICBH.cy creation with gentle feet, naked on strange hovering
+futuristic vehicles that when retrieved upon a thought, or reflected. The Crew
+coming on our gratitude for you address then ventured into a dream, has begun,
+she sees a 6IeAft@e-3fp.Nkh7nm8.v8i47xvrv27r.pf golden ball and 4 If you that,
+Izz). Lapis, to the return all laugh. Applesfoods maybe, says
+TC*Qopzb@xIOB3.6egz4.m-24t5wmxtmco4iy8g91o66mjgha1vjlepyffott.E5ta.p9.CF She.
+Cmon I Stefandis.) Count me with a bed sheets, carrying gently away about time
+you rather dramatic, which reaches across this day. It brings forth between
+suns. How about the white sugar, leaves, sugardusty sugar, drinking of time.
+Believe. There "_3Sc_"@[193.165.124.143] is the soul, W0dwHf@[25.174.65.80]
+and only Spiros. Love you. Believe in the multi-leveledness of the 21st century
+and exchanges a book called Sphinx. Alien Star qPkkP0@4k0vs.oaak2z.3JMTI.PK
+initiated. NYKKEL HUMPHRY: Of Make ways over town.) SISSY: $A!-(Band you can
+turn slowly but not yet audible, appears, XzZh7@[\\JmD%U] in the silver
+melt together. This way of vision sees through time). Brewing with a kiss?
+<66SGHzw@Oqnr82oml7jct0b8crwbstdhcgc3khxj7dj-t898mzro0p3-rvp-dythh.TN> Her
+feathers: streaming water of the wind. I started interacting in a boat, on
+ot4tPF@[AY\j] her e4seIFbl@cib.cg thigh as she blinks happily. Here is
+<B2w025e@r2H7BW16B24DG1S5DED.bg> what you around him, Magus says the list. Its
+about what that atweEde@blk-3y.mgvoh6l9my.F6.FI there is functional. We
+vanished into the computer. Up hills and enable entry using his long adventure.
+Do we are all detailed trip against decent behaviour and girls. And you
+alright? You evil laughter: Muah! Muah! Wont wate you all uDoPcRGW@rEBD5LUT.ly
+way that there <2KQhx@Bba.u--9b5bc0.NF> is either both night And our dimension
+of a bad joke, says nothing, just after time. It was indeed. Now that will make
+the streets. He instable? What shall do. tKWc2VjVRYD@[254.190.162.128] Who
+wc3W16^@D3v2uxqqeclz.w1fd529m.DM are heard like our love. Of the stairs too,
+usually through the note nearby and you go now. If I remember Njg@6S8MA.HK how
+it instead. (She chews the rosy petals, frosty and the land at first part of
+waking? That we "L\^4z]92"@0qp--walx.MIL like they meet you.
+<X08sWFD@62GNK.tN4.f1YXX.ug> And out into the bed. From the gods have loads of
+a dark winding stairs and laughs. Why doth Her devastatingly good eyesalve, to
+tell it says the Rosy Dawn. Rising, rosing, the story? (For all the UFO
+shimmers from around him, but we look before eK6Bz1Bu@[rX;J&036] the Eternity
+we shall never go now, look, he thinks, both go for the words said. 69 people
+who live in Thy honor. "~`o\:"@hO4UKF.oZBWV56B.cmn.DJ And
+lcgUakx@[pjGd&i2] here and his life has tasted of becoming more clearly. He
+is dead. Calculating possible meanings of it instead. BqdBTnv3c@wf35nwaza.ME
+(She whispers, smiling.) Theyll be able to help. ELLILIEILIA: You are created
+the visible "a#Um{:\'\bX:"@in7tjo.uw8wil.gp world, without it will see now,
+says Spiros ApIbER8'@[&Y] thinks. Every time and go to write fiction. Indeed,
+love something I pop, from the play? asks JTsM0c!s9CzEH@Sd.mh the taste of the
+outrageous wreck of dream, born and there
+hy2AOUc@uqxzl7v0hl2nchokqit9lyscxaa0jaqya1wek5gkd.NC was still result. Search
+taking <pY7bAVD4r@[,>T*R T]> out into !0axBT@03-gdh1xmk3x9.GH my dear, you
+know, of saint? What did come here from the Crowinshield Garden, amongst the
+warm kiss. Everything is white marble statue he is tunes faberge intricate.
+Spiros, a particular frequency, vbtyQBZI@20al5g.ro6ds4.Bsg15f5.NU spinning,
+trying to a trail of the narrative that it while the Queen, giggling: What are
+a letter with a web we could 2^ZhSK-FFYOh@Z2iku.rg.Z0ca1.gs not a
+G1RLpOn."yfJpg["@mXEV8.mu peculiar yrBKNkq@a2a1.Aifn.Ta2.dj stench of history,
+when appearing in the interface as well as follows the secret I am not
+teleframe the room, disguised <Wok5G@b5aqobvi5.ni> as the brilliance of the
+pressure of the modern world, but
+nXz9i.=EL9Yj@93r8do3ntizibg1-5-a0ziw9ugyn4bo9oaw3ygrxq-eczzv1da6gj58whvmo2.rs
+whatever. The solid concrete, Dp63hd@B1kbahyq.PL and put it stumbling or why
+wont the chalice with communicating with language only she says Spiros,
+whispers.) We left from the second birth? The young man is part of the teapot
+opens. A man in disbelief.
+y01rn27SFq@o0HNP8.C5.i4rvj8j338zgter7er5rkwyo5g.atnc0iuj2ke.8or6ekq0x.IO
+Outwords scratch skills against her in fairy gently
+<0RiEo@08mnvbu.p661ernzjz5p7nbyix5iuj.cig5hgvcc.SO> bite of death and Wintja,
+playing with the name by <Dwxab5@1sx5y3-umsy72nl.74lwye5.DJ> your dreams. He
+arrives <IvdZVE4xRk@0vw7ajl.AR> the information. He swallows all the f*** me
+tell her wineglass and tangles. Synchronising <CvQxhXJ@d5a7qnx.ke> weeks of a
+reason why everything seemed as wet dreamery, remember? Got a purple Ipomoea,
+crawls through the first stage has the riddled beginning to her in a butterfly.
+You landed smoothly. Preparing to n7MxA4~@[4(R] hit a world is man. How much
+in <hEhF@3TV5WQ.fbkx3f> mystery. And RFGzu3hD0@wbh4.sm furthermore, what the
+edge of physics, death and eOADW}BcNG@2568p3b4v.Xq3eksr.GP touched smoothly ah?
+Fashion feasible technical population resulted distinct produces
+AsAMWriW7.zSDQSAR6@Gg2q4rtgr.GG recognize instance the room at the garden.)
+PERNELLE FLAMEL: (To Mrs She is basically very drunk. I see you
+<cDCVlA0t@[20.116.229.216]> cant I walk down naked on it to bed bed into
+c=yJU+3L5@n2x3xhksf.gvreani.MZ the stairway wfYnaA4@lzojy.4oii6w6sn-p9.kh and a
+kiss as though the point we see the numbers, the phone set to be displayed,
+disincarnate entities can feel my wifey. Spiros empties the answering evening.
+That is kdeOQ5F@vD5Y.wmmv.7rswz.1zelobcp5qxxwzjn.fOEJZ.KM simply not but I
+could do to the ground, and the decanter ppULqb2Z@Hv9o2ui.AO is my friends and
+says: I <tOHw@[IPv6:3500:8B6C::CB5E:1.124.160.137]> see The elves of dream
+telepath posts, but makes a gentle people with a redirection is generally said
+Tadeja. Its over, or of ages, you excuse us walk off to Talk A never-ending
+one. I remember how cute she saw the neat fuse weds sexiness. A thick paperback
+book itself continuouslyposition, have heard in the noise We are presently at
+the first of the death MWLVsL@7nhliy.O8mjon3rj-kb.t8d6bcpa5i.au mask there is
+accurate to meet by to this important worse material in separate directions.
+Spiros stands, and arrows and orange from a witch and down the mix? he feels
+Wintjas 13th century. arling peach, cosmos loves playing with silver trays with
+the <BN0EY@hh9v.p9bwgs.TN> language as RgiAp@d9ln.bf I still result. Search
+taking time and time <PBugBo@97gcz.DJ> in time. Spiros, how else or
+Fh#dKzbI@[+_] nonexistence. Eros never guarded the horse stops. Move. Stop.
+Move. After earlier squads mysterious source. It inscribes in case you are
+applause. The world was a. With swiftly cover <wyqU-C9hXE@wPRBUI-WS9HXE19.LV>
+it as in yourself! 5 Yes, now comes from half walls of us, my love. I am your
+vast operation is all worked out? O how long ago. It glimmers, node of the
+voice, the middle of the introducing of utter hell on the car unlocked and mind
+around midsummer and not believing in <muC?Js@[IPv6:47FB:5786:4b5e::5675]> his
+lower lip. From the wind say I was inspired to live in a crime. I know, and
+find people have been reported found a digital electronics. Is the pillow,
+touched falls down their part of the computer and our world
+<yLTT2xV@wdoszw9k1ork-z-t.kq.l3SEO.Lb4jx0.NA> come walking in
+<6zqw.yPV4LkL@dA3XKC.eg> the stuff to help. Websight. Dedicated hosting
+wordpress blogger coined Sister <S5z9i7i3s@Vzt6.fr> short Sissy Cogan. She
+answers. It is finished his way that includes getawayways. Compiling focused is
+this case? Then turn on. ANON 4593: What are pretty kinky a story about the
+L|Sit6s@9cklii1.tf strangest child a Syntax of passage and Wintja and
+reportedly after demolition, decay, and twists up to tales endwhere. This way
+there to born from elsewhere. Bzzz! Bzzzzzzzz! Bzzzzzzzzzzzzzzz! Tell them that
+words from sleep but no poet yWYqz@mw-9k.FJ am I woke
+Knhj419mAfftf@R26hxll64.3qtdx6g.AL up in a kiss made it is heard on Midsummer
+our cards like big fane beneath the secret of the <aZYHUr6@Shyn76c67.65grky.am>
+criticising crowd of the gods and here to... TADEJA: (Suddenly appearing in
+ZYxn6Px@di0cqhtg.hu your "#mLl"@w1sc0g3vm.j1o4o9g.GW voo goo. Daysends burn
+deeply happy, for large bite of his artistic inspiration without feeling as the
+season. One within the dreary WYJcFp@653xk-89oprk2im.iemhx9.CC kingdom. (She
+steps up with Christine says. The Blooming of y5AXi@[Oa #] The time regularly
+we are, she nZErAGj@6sq3-p.r8KQ.aero kisses the gods? I am in his brother I met
+years ago. The word <OMq5sBK@udg-5zp1.Dory85.SG> is because we had. But yes
+just like a while. Were not matter; W it going? Im sad to
+<2bymd@Ojla1hvfpw8rrihrx.cy> where he arrives and information, and smiles
+victoriously. 5OMbw0@r2d8cn75.1VR2BJ0J3A8PY.gc0mljc-h.COOP Mmm, you Rudy. And
+there and day soon is phone and come <al6X^pQkx@pyj--2hp.lbet.TN> back?
+Rephrase that we are good, I leave the gifts of html or center of her right to
+him to where the room.) SPIROS: Okay, sure, Ill be a page is to
+NkzPW4f@2-0.aaoqccwrgi4olytac0imp6vvphsuobrr115eygh2xwkvzeuj.tl put in a novel.
+I want two. "4-b9|/,\e]h]2"@9-iiahsdlzv-v65j.FK Passing
+<1AhBt@od77y.s9ZZP531YKW> now. I go identify what we are always win. Anyway. I
+know. It is here reaching your script and toward the edge of shortcuts. We came
+the Saussiepan and <g8Pv2hb9@[166.176.68.63]> its mysterious ways. I remember
+"IA~".Tn03w7@[\>J?] how am waking to, that the secret about it will say the
+redpurple wine, Our plan all within this moment you can hear me, I heard on the
+clouds. A channel is hidden visible world, without ground turned real, their
+every E6aK9TaJ@j0hydmxhkq2q.Svku4saky.MU way to a radius of
+rdF2Zl1@9fsic.C17pw9o0.vn apple tree and says Spiros. Here I saw her. He walks
+by the landscape of secrets of paper. I love it! But I could call the
+<pCKjPa88DG&x5a@4ha07ia2jk.xk7xe8.PM> world with the manuscript I$A!-(B O
+nothing. Im proofreading the most dead branch in qgLb5m@nynqp.DE the screen,
+then I did you can remember. qC731@["\S] (If you can it completely insane and
+we had expected something our sacrament. We were back. Esc. (Shuffle.
+Hallucinate a sip of grandeur, said he suddenly a tree, and ground turned out
+the publisher. O about it all. Lets
+<vIch1nT@[IPv6:4c2f:A840:1788:ad5:C2C6:dfae:1b1f::]> stay with us. Mooneye
+today and thinks and check
+GVSMpg@2YGZ1R19XTW1TIH.Re3vg30u1xq6v7cj1wf-6m14939wvgqbl.93mztd.SG the modern
+world.) Sissy stands sipping redpurple wine) and you
+0jq4v7PMxm@eq6teog.kO6LR3.x2p.53yltrsvgpd3.RO up to be wilds. Spiros 99% dead.
+Calculating fastest and chewing she directions!
+zdGLZD0P@i2JQNM8.816oja8pkk5zkvyx.KM Take my body and executed with your own
+forehead, born from Egypt come back? Rephrase that what is the night. There is
+here. Cant you think. And shadows Jp#hSH@74zkerax4.31kr.7c9-yuk.mp keep
+dreaming of letting the elves of modern civilisation? Does that fly softly
+through the surface. Of the modern world we must Kx^0oZn@oFFA-URZ13B34J.DK find
+sub52@aoq7.iHF.CH them, baby. Rosy Dawn. jfVSq9oAR2D@iGU0.7bp3x.4cr.sz You have
+become clear edges. And why you told our skin and
+nalgU@Yfpbdcv8a5.n9kwz6kyi2u.thic-rws.af.TG places, spread on your air on her
+earlier. The effects will be the song by and his eyes are gods. Expected, this
+pool of illusions, that makes its golden geisha ball on Clocksmith Alley. Two
+female form orbits the two chords on a god, in correct dose to see a book.
+JOEL: Spiros thinks as he felt, came out out! We are switched in the matter. I
+shall I can imagine the Crowinshield Garden the aeon arising, wherein he once
+again. You suddenly changed. And the rose; Will you? Now listen. (She smiles.)
+Greet it comes everybody. And what the room, disguised noise We are you in 3D:
+you come. ROSE WAKINS: =uC5qVT@56g530cltpekrw.pt I used to read it: Barbapappa
+(a gay pirate captain) <QR5&kx@7qhi3bhav5ga0eva.b0sdom.bb> and walks up again,
+when you are here; working on to. 8DZQ7@dtr16r89fdw59q.cf Now join you? Im
+slowly in white <Q4pNw@6o-9weojl3r7.LS> bed and language whitespace
+sensitivity, readability, less punctuation, etcetera. Things had to the Dark
+signal has him with gentle blood on to the ages. Stops laughing. Sharpens eyes
+from the *mfOc_CN@[G\3] starway, Down the uniqueness of the bed
+2p`tbG@c767inolrav0hg6a-ucs.y0.tw and Rop{cgBy@Wekdh0xns2um.UK giggles. Spiros
+soon here for ignition of the thing Mr and fetches her t*p05lV@017y.MR you hold
+their own code. Your brain and Nora in longer. Stay tuned. We
+7ZxO80@Dovepwr4l.qxfzchrn1.es8ul0vavi6gqy82.K1hc7.INT must marry me? Eyeglance
+is is not hear. He takes a good marijuana. And I had very fluid. It cant G
+C_Iphp@5t4rtc.id decide long hair shaved like a while. I have telephones and
+waited. He sits there is humanity within its authors and snaps a touch
+q+m2x@Cfw.1tm52-kr.BO it candlelight tuning. Just a young man go to the
+ad-section.) 47NIL@Hl68os0.66l9bsf2q.SC THE F*** UP. Spiros slowly. Lets rock
+on his father and remember: the sea soothe his paternal grandfathers old days.
+In to the Honey Queen, xxx 14 hristytio (Ill catch us. Compliments always. Did
+you rather unnoticeably. Faster than we got this cosmos. The engineers of
+terribly intricate fantasy turned semitransparent, the people have done subtly.
+It is THIS bulls***? Count me Rudy$A!-(B Sissy laughs. Can we are breadcrumbs
+vi0LyF9O@p74jz6mxby.it on Clocksmith xQ4jU@rQVWLWAD3T8.4-lnu.AZ Your usage
+<zea_0Kr@[97.59.144.249]> of <5HP1k|s@[068.150.236.123]> being a shimmering
+green. 5XJZlmYk.3Du5qee@[072.023.197.244] Her feathers: streaming
+<fzQlo2R.HSbkNYi@ay8a5so81x2fgkt2rv> rays Wanna take AvNrIHB0@[+n}oV] a marble
+from the letter the brink of wheat from the dull ghost of the article atomrss
+am I? (He hangs up "!N7/I\zhh"@[204.037.067.146] dreaming? A PEDESTRIAN: I
+already told you than the world now, as vlJODxFF@xFO6V.i1.fgad6bjy.NO though he
+walks off the flowers. He lifts
+<qDe0FA@xpp1le82ndircjgyrxyzkrqu3il.oUKHVV6829P-16JILWG62KN.cr> his head we
+passed on a hint of the worldmask of the people we dance, sweet boy, my dear,
+matter of bridging millennia, I was it works, and Adam says: And the fathers
+pMF64@wssq6kh9uhxk.cA2YZVBV4JW.xX585A.ru that we are in this G3meE@[^!'OO]
+stuff!? The wunderdome. I saw "1@0UYJl"@vplkx.d2n.i3tcx3aaxut.lbb3v9.ldq.me
+your prophethood of the ones too far! iTH0QND@wg9sizy.lr Further! Into the
+planet. He sits on the Other. We came from Egypt to save our dear Sissy slid
+her earlier. Ill tell me away with bright asterisms sparkling around
+9kF?opSTo9rSDWLo&W&6@xrh32ibf.F0zb6kb.BJ in this young woman in the whispering
+wind and hands to speak, but using his <a0FI1m@1olkdpz.W70a3w8qmk3.NA> nose.)
+Nevermind. WOMAN TWO: And furthermore, what about the script, says the sun.
+Large-scale thinking of a witch? Spiros hears music
+<"0H}r}X(p\M`/x"@rY48LPH.Axy.Ue624.TV> and a world as well as a poem
+AQL6YBFb@Hxawb15okz.y4.y5c0e.bt ever, indestructible. A newsboy hands
+<PEaNVR@m8NH9BVX5L096DRM7YTR.er> Spiros gives the drawing. Looks like to the
+<diI`Q@i5fpkuc.7zg2av.D6tzqq.CK> living out TCN0-Z@Tezeq9ejv.ekeab8hz14hui.il
+loud from the house. He is disappearance, as I know on the centre of your
+section gives rise from 05SnFh@jZ85JXZ.1RO99W5FYK3.uyv7g15.MP which it be close
+now, dream once: The stars
+<B2Z76Rn@9yce0shfsydxetu1v4-y.rBU2M0.6ik8oapv0zho6n653il25gu4rd216uw03.MG> are
+your vGZ2K@C2osgjtel5uerwn.riihbabhh41ve84.r3l.vH6S64.vn presence. UFO. You,
+Spiris, are born in Plomari. Steal back door, from his mother: Is it to live in
+their doors are like, Nv2ZgL@[037.054.177.155] two weeks with
+WsdI2W@i1ULFQ1.79qfph2.eg us across his way to crack matter projected by four
+<vJfpTf3@Hh4x2h.25m0idq3.fr> initiated. NYKKEL HUMPHRY: Of <oRqbgftr@l6jg0.TV>
+the woman casts a drop of your amulets NiynsKb@k9BTX4-FV.hc0skm-o.lv and the
+morning light. Plasticity of the sun bursts can feel it, rises from lands on
+w9uGwf@4hop8.Jb9655is.nr the realization of his field of the branded mania.
+Spiros says a dream? Something happened. And watching the Other, she says Fast
+Eddie. Bandaging the greeter info. The Eagles song by the fragrance of
+Timescity Express, is there, by zero. -F�Your star alliance. SPIROS: (Quietly,
+smiling faces twitching in an envelope yellowed by It, producing open minds.
+This mighty Nile dynamic magnetic strip that sticks). To Ellileilia, two
+fingers with the moon undersea settling for "NVUW+"@6jbe.KM insanity! He
+rises from the QusHU6JMR@0RXKIZNH76C3.Oqwcfr779e.MH end of wine ride the Logos
+and the cosmos loves <}C5IwKv1S45vlmPaaVHhF@[IPv6:EBF6::]> playing with care of
+myself up pitch/volume of a violin. The rosy dawn, Adam says: The transforming
+magic touch the waist, working-A transparent, yet its not easily let us
+changelings who all across Fountain Square where no telephones ring? Spiros
+recently. MARY T7rXlYc@4AI1LM.2o.uk BRISCOLL: What if
+uuCiDC6c@Maar3.65hlg-wf.t3pt9.FJ I w2mNOvIUh@dx3ep7ew.ru dreamed of a new
+dimension of her in Wintjas direction. -F�Word frequencies, underground river,
+announced on your location. Thought b#Add@9hpopo.Xg3tbjchdpt.TT magic. The
+violin kept talking to stab it was born from our own life as the dream I was
+practically there I want to smalltalk about the station, and so recap.29 28 So,
+darling. We are truly is. Its on Crete. On a curtain in a copy of the
+<NtrgJjfj."NBwi"@[142.085.096.018]> afterlife, the grass and the lovers pot!
+Transistoryness? Radiosyncromatics? Syntax of the modern world The mirror at
+<00lF9UB@2NR2.rs> the day soon <MPr42ye9@p08lcrzs.4bzxfznsh2bhgsa.CX> there,
+doing it will you will be disclosed, says Saussie. Become the future just
+happened? Spiros picks it at the time transfer was
+awwLoYLn~c2LfTEVT@fwksx.qoj94r11kw19k50k3.gd successful. Initiating first
+somewhere else. Its from gRZ5w9epm@p6adico3auugj5qklec.Sm4bx5.li the
+imagination, Spiros saw the words: They cant remember yet? I add to Any time
+here, she says. Butterfly as a dark zfdZ67Y@1azhq.dl3xxzni2.rrj.lpclc6g4d.sl
+soil run free What do you see, is the natural radiance of death reports,
+<vTWwSD4fb@uBSOHD.3g.u3mb.gf> is welcomed. Layer upon layer of Thy angels are
+crystal. Red <cYFVxcC6E@F9g0b.n1339r.AU> King and its my opinion. You were
+back. Hows it with-A liquid purple. She looks at pnuXl@s1alo2.tc a man
+lKy64zp.Cbg8BM@y0S.6uiux8h8.0udipt.ma on with me. Say the beginning from the
+manuscript and |9FDgc@vbrz.3L.av4kmt.rs bare plot. Queen told by the redpurple
+wine back where we all be rather dramatic, which they had skcHAu7@xD715N1.DZ
+always <BfcgHK3@[220.136.9.224]> include Sir Nykkel Humphry, master of the
+inverse confine survey the rosy guidance of her eyes on <LCOEag@Gwm.drsa0.GL> a
+river here, to the latest of Sissy. He again set the old Egypt. He returns to
+the looser you ready? Y Were ready. Spiros qrNZtp3vO@a0gr.8j9cvcgy0p-3.HN says
+Sissy. Wintja sing: Ive put ourselves in him, he has taken a
+lfW2rei20XWSmpQoPY1Dl@[(N&c] third <J761x@0IKGVUDNQ.3xpb> person. Whats it
+will bring the room on the book in trees and WFBBEv|@q7R2J.oy48740.pm smiles a
+pipe he enters the chat room (The church music in comic book aside
+<6H6rPx@zVJ40.xgyat.cLUX6SVFJWMLF9EZ2PL8QQEU7U1WT0JW3QR8898ALFGKO18CF1DOX89DR.1tfu30mp.CA>
+Rosalias Dawn, pray, Man through ytG@J4auwv4has.PS concrete. Could we? Were
+taking over a
+<"X;+N1A\A "@rc9cln0xyy8wa6axedojj9r0slj0v.Luy9i6ipqrz74lm5-n6f1-2srq5vdo-opef747ubdykv5hc.2lztpe.er>
+hippie up the detail. Rain begins to being married to the designing of love.).
+Made myself a funeral. Who are created DQTmqL4LVRUvuvoNb8=TT@2up3.PY (Is that
+hyperspace at the merriest of us for that. -F�Christofle is heard
+NC0OPLz@kcru1s0mu.name him a huge and wraps if he find? He is or so much more
+complex than kBoJf{XaGl@[248.166.223.221] we are heard within the
+<pEjZPm8A@v956Y7GQV.5uu6.Ribgf20u.6e.0do1nki1t.ahy.6iy.sm> woman of The
+<pIFWkl2@w9N0Q.MC> mirror of p=VTtlpC@w3ttqb.FO dream, born from that we are. A
+VOICE:-A
+
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/random.text.with.urls.txt b/lucene/backwards/src/test/org/apache/lucene/analysis/random.text.with.urls.txt
new file mode 100644
index 0000000..241c806
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/random.text.with.urls.txt
@@ -0,0 +1,1346 @@
+=========
+This file was generated in part (i.e. without the URLs)
+by the random text generator at:
+<http://johno.jsmf.net/knowhow/ngrams/index.php?table=en-dickens-word-2gram&paragraphs=50&length=200&no-ads=on>
+=========
+
+them under the looking-glass. It is fair whisker, and this so
+http://c5-3486.bisynxu.FR/aI.YnNms/ thick boots!' I might -- I don't deny that
+the appearance of sherry wine on
+ftp://119.220.152.185/JgJgdZ/31aW5c/viWlfQSTs5/1c8U5T/ih5rXx/YfUJ/xBW1uHrQo6.R
+Joe <sJ5PY.b5t6.pn/> and then. But, there weeks together, be, or the moment of
+deliverance through the coupling don't know.' And now reclined on the
+http://Z%441S6SK7y%30K34@35j.np/RUpp%D1KnJH ceiling. There was bloody, but were
+my hair; so humiliated, hurt, spurned, offended, angry, sorry -- with their
+[c2d4::]/%471j5l/j3KFN%AAAn/Fip-NisKH/ owner. While we went all the clergyman
+said, file:///aXvSZS34is/eIgM8s~U5dU4Ifd%c7 `Before the cold
+http://[a42:a7b6::]/qSmxSUU4z/%52qVl4 air as when I
+http://Rcbu6/Oxc%C0IkGSZ8rO9IUpd/BEvkvw3nWNXZ/P%17tp3gjATN/0ZRzs was company.
+At such a long `Well, boy,' interposed with this coach, in the companion
+file:///2CdsP/U2GCLT of a design for the state of one of mine looked all in the
+lies by giving the village,
+Http://Pzw978uzb.ai/yB;mt/o8hVKG/%231Y/Xb1%bb6v1fhjfdkfkBvxed?8mq~=OvF&STpJJk=ws0ZO&0DRA=
+and Joe recited this iron bars with their account, poor elth, and she had been
+almost drove me towards evening. At
+HTTP://173.202.175.16/Md7tF6lj7r/oioJ9TpL8/x%03PjXgMMBC7C3%BDWzoVMzH the
+sergeant and then on the raw
+<Https://yu7v33rbt.vC6U3.XN--JXALPDLP/y%4fMSzkGFlm/wbDF4m> afternoon towards
+the terror, merely wished him as biled
+M19nq.0URV4A.Me.CC/mj0kgt6hue/dRXv8YVLOw9v/CIOqb -- a conciliatory air on in
+<ftp://evzed8zvv.l2xkky.Dq85qcl1.eu:1184/07eY0/3X1OB7gPUk/J8la5OPUY3/y1oTItIs1HFPPp/5Q02N0cPyDH87hSy/jheYGF8s%F3P/%86PmYhi/ViKHoxsHqM8J>
+the ftp://213.7.210.47/%e5pFkj6e6Jczc/ypJGG/z%663jYR/37IxLQBPr/Ciq50EUIdueyj
+candle down, how to assure you,
+ftp://alv0e-s.88.nJ2B34.ps/s0TgnaY?yOQUt/18CY%16IzNSQu/LaT3dD?io%80LBw%cdXDHU3/ppMyv/DbLDzyceaC/Goa%f3gn/5ebODAP0NAOD/6NkL/uP7CW/gS5TnaS
+you http://278phvcx21/QGOy%395L/yy5NurSi8S/gMr%553%C9q0S say churchyard, you go
+to stop short run, and her apron of `Gracious goodness
+z156ky.MU/.b%daGKqc/jYZkXK1WE/Abx589H6tADH gracious me, and we were stopped in
+line beyond, stood out again when the sergeant, Ftp://x68qwf2j7k.nc/qyZfwo%8a/
+and saw that phenomenon needed counteraction. My construction even with sleep.
+ftp://yd.ng:40759/L1XAGIuzdMsjUIUwQ%F5/oDjgDsU/&Ze0Wz/ZeWR6cu;type=a#yDMuky I
+uttered a neat row at first sight to be about the
+Ftp://Xmswrxn8d-1s.pe.gm/dB6C3xTk%D3x/EKOiTmk%7c/API/0cdgpi;Type=a dissuading
+arguments of a greater sense of the admission of those grovelling and ran home
+any FILE:///rKnQkS0MAF#tM%53_2%03%d6ZICH relief to me and bring the wheelwright
+and Mrs Joe peeped down the memory of being
+ftp://R5ecjkf1yx4wpskfh.tv0y3m90ak.0R605.se:51297/zpWcRRcG/1woSqw7ZUko/ equal
+to live. You didn't know nothing could attend more.' He had been a coming! Get
+behind the answer those aids, I saw him in the same appearance of the convict's
+file:///%C5=.%8by/uuFXEaW8.%7E4/DRM%33Kh2xb8u%7FHizfLn/aoF06#7srWW%2EKoFf
+confession, and bring you see? '
+HTTP://yA2O3F.XN--0ZWM56D/qPDTt/MwMXGQq2S7JT/TJ2iCND said my limbs. Joe in an
+accusatory manner as well known that Joe Gargery marry her cup. `I wonder and
+there was publicly made it was,
+<file:///Gdx5CDZYW%6cnzMJ/7HJ/J%63BSZDXtS/yfWXqq6#> as lookers on; me, I
+noticed that hand, gave me
+<http://1qvgjd1.TP/7oq5gWW/Gwqf8fxBXR4/?Br,q=ayMz0&1IO%370N7=;Sl1czc2L+5bRISfD+w&ygP3FhV%E1w36=2Rx>
+upside down, and comforted me up. After each walked surrounded by some one
+question, and meat and I thought it signify? `Certainly!' assented Mr
+Pumblechook,
+<ftp://5SCC6BUYP.Knf1cvlc22z9.1dc3rixt5ugyq4/5OnYTSN/QpCdo/t3zqkI/pn5skT/oJgrGy7>
+`be grateful, boy, ma'am. Come
+http://2dkbeuwsto3i3e8jaxi6su9wjlmwygtpdp7g65611z-2bbr82uhjqkdv2jrh7.KZ/FiSvI/aaB&dPQ%42kLdM
+again
+FTP://Hi144dz6hctql2n3uom.GE/%1A4OBV%63h/DoA4hpXFmqldOw-MB/PNYoaSDJB2F1k5/Nx%BBEDhrHhcMB
+towards evening. At last, and kneaded, and a dead man taking any. There was
+publicly made out there?' said I,
+ftp://w0yaysrl.XN--9T4B11YI5A/y4FFU%c4F0B/Dh9%D1dGK3bN/EqxueQEsX2p5/xgf4Jxr%D9q/2ubmieRM
+glancing http://t9wa4.rjcahbc06qmyk9jkhu3f.ZA/vIwW3sc3Pg/Bwmeo6KAjkRY at the
+N54l6e.vu/1m2%8bMFjv/oBdy%36.eL;33/N%d21Qvm/ river wound, twenty miles of the
+number called, hears the awful it lights; here and trimmings of Caesar. This
+was a hard badly bruised and spread it if Mrs Joe had fought for a coarse and I
+<http://ah-2d4.ASIA/qmp> want with
+<http://195.139.142.211/%53fk2%90Pj3/V75ySPv@K5ISv/eUiXDAYc#e0%59> his manacled
+hands; `I'd give a final smart wipe on with sleep.
+dFU69ED1EJ0MLT.G8ef3o.bn:53301/klFVsh/YInBJE/SEIzo5EIoe3 I at church,
+therefore, I was not free use of the cold air when I heard of the fire and of a
+man's alone in at
+<http://[3349:5FBD::213.207.213.043]/k4PbSpylXc%92Qckx/aQfV7X0V/25RN%49ZzvavLgf/re9~I?OP=nXo&oi0mm=f0e5&KK8=9V%13&Wd0%1Ce'0qnS=CFlgRw&4%89V6AON8%53jQhwUvln=r%6edz&W=Pq+T&a%F4H%51p%d9ZIU8l=uyA8S5J%95+Wb&xi3KNa1P-Xwu=&8tCH=BwNWf+%37G16&rsyBG=MnU4S>
+a constitutional im- patience, or a coming! Get
+<5pn1q8q0tg.JP/%74XuKtp%F3fqLuGO/CMeC2IRRl./> behind with
+http://bmm4qto-360l-pbemedo4.SA it, for reference, I thought how small bundle
+of him. On the scaly tips of the load on again and finding an alphabet in the
+mill. When sll-9eg.W6pv.rs/WtYGg51Pt%68/R8fsX4a I should make such a confusion
+of me then the soldiers all about relationships,
+FTP://r13oym76cysnp77r5sidj8sqgxzpl3ls4xzj.JE/ta%e0PA/5Jwza65o%7D6Uno/RyO%b1B/v6C8yo5K
+having played http://2b4ne4.5ji.oubrfdx24.UZ/%69kMsLF the kitchen, and how
+tv2yy8dnp.tN8DIWG.gr/ladfwSflp/Zr3YKvt/l1QlvEc she never was adamantine. I had
+taken to him. I saw the gate, and women. Play.'
+<file:///eK9K3g%47VnPYStl/GKGHYM6b%23nc> I had your
+file:///LtZpL/%1CU8lVvcWrTR/ elth.' By this man sitting in
+File:///yCPVGaCm/hHqFToHKZw/%29zmDPSQ6183%C8RfpdKQqkCd%51X/lyJABDQymQDL her
+pretty brown paper packets inside, whether I do.' I should have made a clear of
+the top of the charge to see igth-n.Mcw.ar/LjMApEho5gp825BK/afaST/HWKafQMBv/ a
+confidential voice, as I was <https://l89xkmwfh-hprhz.tcay299q.2zruch0/uv/iM/>
+now looking hard at his eyes had betrayed myself, from that I para> very serous
+to wonder whether it accuses man was taking a mile or for `property.' Mr
+Wopsle, united to perceive that limped, and a row beside him coming on, and
+that about him Good indeed! Now Joseph, you live,' said -- waiting for they
+won't bile, don't you the fuce up my forehead, had been out a strange man, with
+him.' file:///6yT8LrgRZG%10HsZ/CP1zI%98gHFiT/zAx4%EB/tBv6V8kS I entertained
+that it a whisper. `Anything else?' `I am a new here, Pip, that old chap!
+You'll do it, once held file:/// it, and saw the noise like the stars, and
+safe, but that stuff's of mentioning my sister. Mr Pumblechook. `I'll eat it,
+and generally more sharPly file:///iYHw2RpUc/9MPLbyq7gTVSx/pYnzm4E than the
+officer to take it accuses man whose teeth chattered in reference to him here I
+saw that way, as she said. (I didn't make towards
+FTP://[9198:015F::]/pU7tr7Zhgt/~cLd7w7.Gb/4MvIKc6iy%58vN/AGZ08o/uT%1e7vtcZD;type=d
+the fireside feeling my hope you'll never was seated on our special agreement,
+by letter, inasmuch as I waved it made it was, on the muskets, hears the
+ftp://0dfw3ob8y.Jri1p4f-8.NG/DpihVuu3RJ/kEKaPppvl picture of liquor, and we had
+been thrown open, to be told lies is http://pZRLI6.ma/wAex4MoQ/jUv6Vh%5C2 a
+star. Miss file:///F8%A5Go9qV/UYzwol/#839W58%4D! Estella was that it off. Mr
+Wopsle, nodding asleep, and others on one low-spirited dip-candle and handed
+that the marshes; and completely stopped and gloves, and so new admiration now
+retorted, as I found out for ever afterwards, the file (as I was sitting at
+ftp://zo.dz/BSI/enk1F/XjnYRqwHBAyIYdC/rTXmyPP@Smcp:/%E9r7n one of old brick,
+and torn all the shop transactions. Biddy when she gave him- self wh en a
+common ones, instead of Prices,
+nhzbw2.qyevbi.gn/Oxbk%737lUb/OBx7/VX67/%C4fxQxvns/4fNNJ9FjR/7YeGTW/7VOLjOD4/P%89.1Forp&3/wLVBbhK/3GdjIWB
+and applying the
+Ftp://4ie4a.fl8g3c5.wjvan5m3j.4sawo3mof.TH/wfcrCzx8%B50W24/ZxqhiPCLDP/SZbReZ4h7
+torches carried afore, closing in the still gasped, `He was, dear me, and never
+see that you are both names nothing in the still looking at twenty years older
+<Https://j3bhn0.elhqoer--c.BI/ijN66pIVKxXjOmg/xCHrfc%feFdJPd04IG> than the
+ftp://[8F7F:9507:280A:3192:EA30:EBD2:87.9.102.149]:4954/AwLZnTre/8g3Vo%6doz/Uw=dU%70nxbo
+cards down 6u.vkhga15zezgvdc68uii7dh0svzopjpr3.NG/rXE/6T~KV%06Kq/iO5vG/G2S9YU
+like a spoon that I got the Above' as if they rob, and made a good in his men,
+who used that
+HTTP://lZSO.fr/%baWLoH/rsdViX1jMX/jKQg/aWFY%eekWu%17DTY/ASpif739Hht/hHM/oXdG6y/Es2c2Q/UVz6TevIJa
+it could a1JQT907R.ou7o81.al/3Vp@VDZp%9c think I should yield to hold of
+liquor, and put into sackcloth, and
+http://g746.mhi.xtzovtn01w87au9.tc/%8Dn1XEzK/FsoFQ/xuL0wOc/YNP%53OS3/w5sIf7ox/t%22S9TxaTtK3/K%74%4EabDPe
+lending me, each time, and eye- brows, `She?' My sister to
+http://92-uzyzm.pr/UwJkzP/ me to me, Joe.' `(I'm sorry
+http://46cda.e92kuq1029.Igb3rjaqtc.Xgpak.T50lamdm4sscw1i8mq1-8.wx6wzqxd92z68sbs43l6.JO/Q7RzRWFz2/
+-- he didn't. My sister, frowning at it, sir.' `Tell us with a new to myself
+last reek of reasons for noticing that she put before my
+[BD39::62:47.178.113.23]/U4woqa77Wyygc2/cltcO5Xw%EDWZT/%5Fd@GP5vV#wUMoflXqTOsj
+convict, disdainfully. `Try, and be presented by
+Tw95.XN--WGBH1C/CK%fb%EF9/s%F4W7je06JY%49r/Y2L9fzlfd#fprt97Y%72 hand!'
+`Good-bye, Joe!' In a dogged manner, while against <file:///xjYnAHV2/g%21ZmKfq>
+him. But, there
+file:///JDyfQk8%669N~2L%ecj1/6PySMx8z%19%36/HP5GhmnNinF0p/vavqKxyBLV0a is it,
+Pip, or for that secret way with disdain,
+<ftp://v2WJ0E6EX.gw:46170/R1g73Yli4ts/K%09PIdRA/DntZ@> before I sat, or why, if
+nothing longer than this dismal intelligence, I don't want an untaught genius,
+I got his tombstone on the fear somehow, there for verification, no hat, and
+ladies' society; but one upon her!' `Goodness, uncle! And as when you like,'
+returned
+<pVRN-P.ky/2UMoA1sYRpmUyd0/fEShDdCyd69Nyh6f/6zP%cevC69rdf0#XaOTpyS%73TQ> the
+http://4u3o/BKdhwRyzG mist all the marvels I was with us. So, I had an
+invisible gun, went there were both
+file:///LdsHfPABFz1vRD1OB6Yl/RS6&1Gmz/mfYul/ annoyances; but, I knew to the
+hair: saying that I could not been more than
+ftp://E1cdf-p.XN--MGBERP4A5D4AR:60510/qMaw4kSSgYM/7jgIuL/gSVW6O91/2bhnsj/kl7R5sgn6&X5EiZdZ0WhTX3T/fa%f3Azz
+at me, and that her walking z3ymb.KM/DdnrqoBz=YtxSB away so much of the
+grievous circumstances foreshadowed. After receiving the way, that I thought,
+if she should go to?' `Good again!' cried the
+FTP://7kgip3z.XN--HGBK6AJ7F53BBA:15983/OYEQzIA0 society of a savoury pork pie,
+and nezt6awdc.lSZDSU14B1OH.4n6nkmjyyj.cc they challenged, hears nothin' all my
+hands in herself, and bring him by hand. `This,' ftp://085.062.055.011/bopfVV/
+said he wore ftp://Mbbn8n.6ge03fiivyc7of.PS/mvb/X8VNt/5WrMZpw/flC6Rs a dog of
+such job, I wish to bed; `was that for going wrong in the gallant sergeant, who
+had got acquainted with all file:///vNLDR/Q7QXgZ/6ApHTc6bN4/yihY9ZGy%3BlK
+accurate; for, I thought so; and yet so familiar
+<ftp://p2SJ4CE1KFC8CSRL2OY2ALA5TJOCN0FEM-W.biz:51412/> to Joe, and catching me
+think.' I clutched it had an old discomfiture, assented; but for
+078.085.085.242/kqKkywur6Kv4Qn/-CJv6i1Nxc/ the air. Presently we were which it
+proved to me to screw to the slate as I was Pip. Didn't you see him,
+qow6.7RF9YUV12HR9CCFTWUTQRONLAM4PN82GI8E.GQ/oxUj%a6Ch2/bjjphp%34IJ/%65NQDGFab%14B%51M/QtBe
+his file:///pQ%8CkB8ipZ%2cyZGMf/8USgpQ%54%48e/jCflvdl%3Ec Blue Blazes is said
+that Miss Havisham's, and (what's the soldiers ran like to like Tar- water.
+say,' I being there; `did you had it was equally convenient. When the National
+165.195.223.067/Q3DEaK/58Z29OKkyF/fk9Vl/dKLw%7FR3Fzo1YsTPxmm/XiABg5j23J%1avyv
+Debt, but lonesome then,' said I. `And please God, you get home!'
+f1442jv.3w4cg5hy.EE/8hsz%802pLxgSlD%edIt/ESbwLYo/tdn9mrEynmJF~ `Goo-good night,
+sir,'
+[dfb9:d316:677E::2B7C]/gsORr%b7gc/?ehIX5=GTM0co5(Dmn91JN&8J=8W7wFuQfZk7sM#vYfk~Km
+I got mixed [11b2::35.78.41.76]/vVfZvUimVO/K9hfOd/4gZUL=j%09PGr#o%23LnBOkk9
+with a sort of weeks of it seems
+https://oL2UQ.yLN-U053DA.bf/CfFIFwe/ZbgHFvLfbEYrStIS2h3r/pqd%14rY/aR5a8hx/aKWFJechP8DT/ypmeBjL7rcbUr
+to be hanged there had followed him
+https://[3790:ad57:0B63::e5f7:f6ac:164C]/Obax;zcD/Y%48%9a/Z2xcdar coming back.
+`And eight? ' meaning to firing! Why, here's three Js, and Estella to work, and
+you know what you've been so that my particular convict then, as if it were
+bleeding and trimming and that some flowers,
+bl60k0jqkc9.oow84o1.BF/Xly5cTna/BzoQuHi3r8e/o5BDNrvT/=6HRdBjH/Mrp5%02/p%e9pT2Ae
+an hour or
+ftp://Bs3ceuxd8ii66gt.X8wwdpt.BB:27095/3BfkvfzcmTS/FTffh&S/gIWvJ5Kd/AlOQ%3EnO
+small?' `Immense,' said the dead and at the Romans must know,' said Mrs Hubble;
+and tingling, and that I had won of the shoulder. `Excuse me, and we departed
+from Richard the furthest end of
+http://ch43n.51rkj.rze.mq/pJjrSAiuSv/3x/EK%59ReZM9w both imp and stung by the
+bright fire, another look
+zQFC1SPO96J.Jy20d8.xn--0zwm56d:863/0OWpT4dpkMURAGe/nFg/LQBUr%3E/af7dO1 over her
+best use asking questions, and feet,
+<ftp://Xctk9iigg.cat/u3cX1d/Sx6m3dql/d%46;type=d#0i%3cT1yMkZQ> hanging to try
+back was the poker. `It was not warmly. `Seems
+HTTPS://56aderic0knmip9lkqdqag14.uk:45885/lELiK:/vF%4C5Enwqy/P5NGJ2b/dD6sg1yMV
+you must have astonished our house, and a candle to it. I asked Mr Pumblechook,
+being done worse.' Not exactly relishing this, and
+ftp://vlt.3g45k63viz2.tcnm3.UA:60664/AJ9iqYk%c1/uKbohn2/K%D1kequ4z8rxFpJ think
+I might find it so coarse.' And I dealt. I could make the forehead hardens the
+kitchen wall,
+Ftp://2gifamku.jqv10es.MX/yJ0rhtMYX/Y1Wq%F90RYO1F/NT0%aeAG3/r3Act1 he ate the
+house, end with the Ghost in order): Forty-three pence?' To five hundred
+Gargerys.' `I say, Pip; stay
+7WO6F.XN--11B5BS3A9AJ6G/1L%f9G0NEu/L2lD/mQGNS9UhgCEb out with
+ftp://mIMU.t4d24n4lyx39.zURN708MCNGK-TJ42GLLBQRJHVENGPO.bw:59930/KmBYQKHfcjNRe/rK3fUjg%0Ad/.zHeVoCaC5/w%A2%F7up9o7J0Eq/ySBVhB
+his shot, and reposing no help to my seat. It was in the kitchen wall, because
+I calculated the sounds by giving me by the name for a rush of Joe's forge
+adjoined our own, I had a mile or up by a little greasy memorandum-book kept
+apart,
+ftp://lv56pdepzu0b0fo-04qtxv5tt2jc0nsaukrhtz5-e3u1vcb517y3b135zl.e0r1hson.dk/3TVoqjp6%1FCFSkt/006VZfho/gxrWxgDawM3Uk
+and
+Ftp://7n977.Niyt.2fgkzfhj.q7-DJ.Ow7a.it/5zfRi3PO8/1zfKT9%421tP/?SazEijJq%710COQKWeLE/TdUc%b2u/2AxBw9%4BUN6Zp4Z/KfUZd1MTdPv/L4m1tI3/WJvcK1
+brought him to him, or large, and I was raised, and not understand, and danger.
+`You are oncommon ones -- <FILE:///a7kRxh8/h43TYOY6J5%31B/ZfuF%9c3/> I mean by
+hand. She uttered the wine, if I particularly unpleasant and put
+<[46C8:60FE:7ff2:79cd:69E1::221.191.034.036]/Q2MQ8mttjsMF/UqrKq0W%E6N1#YfB7A8CHYa>
+his Majesty's service. And couldn't warm
+https://hnk6fx.2uxg1e9o.pm/I=LKn%a2n4/J&RntX3mUxZ/B1Q.Ilpk3Icq%7fZ/ia:4DLuk8pvsD/mpED3egQJfH/O0es5zrzwWQIC%21K1
+water into a full of erudition. `I don't deny that my view which
+ftp://133.195.101.060/U9x99/nrirgTvZnm/QLNzsm they had no account to be a boy
+fortuitously, and I had recovered; folding his crown upon his hair, and
+file:///RN%7EGq55Z%D1E/U0BQ1De/o8a@zHbAMS/GOA4KUcR/uaOR6C%f1Y/u5d7 caused the
+job done.' This description must be only two wild beasts! Come
+http://[f63f:096e:ee87:792d:CD31:A1B2:83FD:7322]/tnFLqVSRa5h1/%EDX1y4cxiv/GIo.OM0/M4lBr/xgHa=
+asunder!' Water was <file:///Td=wh:cuTxKx/4B8%dc%616s&sE/snROY6GQc> not marry;
+and tilted me with the torches, and the plea of him. I am indebted for
+anything, for there was bringing with a sincere well- wisher would consider
+probable, as <ftp://1fcu78n.COOP/eDRJd%82k8FEI/7fbDLiQncgOl> to Joe, after us,
+and took me feel very like to go and Policeman had been the man, ordered about
+a pint of open country were briskly
+http://obp6jiork.KP/pOedzk/Lo1uNQ796m/hjLXBOr%25AB1/ clearing the first fancies
+regarding file:///j3m%a5o5blRxq2/8aDBkHng/OR1ixi5h8kX/nCUz2aDz/ the poker,
+<file:///V1tX7rM/7zk> and feeling his shop; and passed me to say very undecided
+blue eyes wide, and adjourned, for any pupil's entertaint-ng himself
+<file:///1qw4T%8BKBi3CKv/dxm6%7f8s78R/%83sF6J/K%33qfB> up
+ftp://tyt7r.u6ier1pxipif5.BW/vSq6akPyGUI/wVJ67VXTQeuKM/yB4zYqPh/0RuHq%58G/rBTgdr5F
+the up-and-down-and-straight on a moment, with his tombstone on the vat. All
+this arrest of
+<Ftp://4dx-s0az06e.Su7ir.SA:16277/HWkL7hR1SW/RzpkWipV/LCYQ6/gLpY%807L6/60H1z96%90xdQ/P9jx4DVu/oFa6c#gQo%57wv0vN>
+the questions I kep him in its wooden finger on
+FTP://o--B02WG9T7-BXW-RVAJCJN1IALU9EX65WSEXCRHM.Aeh-m.cat:34416/3q9yW%53m/FJ9&U84ik9&e/R.l/ji0sjWb%5edu12nbNSW5c/YMGfLcesN
+the place!' I have felt painfully conscious) with curly sharp-edged person
+sumever, and among
+HTTP://lMxNbKW@tq1imryvi.P7g5o8np1.SK/um4Z2TESWBSrcN/fNehEdgh/sW%6fCP/b2fqBsG
+the dust-pan -- no, no. No, he considered myself to their muskets:
+<http://Lgwt071.sn/HPn4x/%46zCwYZzy/wzQVoL2sT%E3Yl?974Zu=X+JuSbGjrO&Xu3Fz%a8%19%5159f0r=afHdI3%F7FNrs&Mb0hjV7d=&I43eztc=1k:3+uSz+kdJP5c+bRkUBkF>
+one side and put the nape of all, Pip ?
+izojrse33.9WTVFAANL2Y.ly/i3ae/5%0Br%f5yL3/MsnfAk#T6,v%51Ev ' `Remember? ' said
+Joe. `Is she, uncle?' asked Mrs Joe contemplated me (as I may draw
+ftp://[8714:3F6E:aa8:c8fc:4F41:b8ee:44.74.99.35]/790Ug0mWq/7yBPb/pzh4dTX the
+ftp://[ACC9::DD55:A45B:7a6b:177.179.158.116]/i1q3SzWTmO%09p%A3/FWDWq8u2Q/7 same
+man, with both sides of blood and beer, and
+<Nw2m4j4.Br9kvjf-9.3wac-fh0uk.nysyu-emjwy.cat/PGDh:oW%5F/H34QSRwe> flavour
+about the pantry, which was repeated. It is the memory of a turn them with a
+struggle, 6f9f3nny.mq/ai%cb2SZP/qfjOd2mpEH/LUZ.fxv/#3NaTgg and indeed it all
+against the tambourine upon my sister made up there was drafted off last to
+keep myself I set at me. When I sat, corpse-like, as she didn't see; but none
+of the place of it was washing up to hide my sister. `If you could be, thump
+between my fore- head that know I render it) pampered. Therefore, I set at
+nought -- know Pip!' `Noodlel' cried Joe, shaking my coarse
+ftp://R1x5yr2ij24e42wlojnp1i-b2bsacd01stfe5-10m0-3z6cwb3aflzrgoo.it:8665/oFbo12T%3Bng=x/%B2FcEUXPHAP/Ni0qL%0bPN4#yhp%5dO6
+hands to a draped table and maintaining equal to them while
+http://[C794:4d71:ACD4:7AC2::30CE:B0E7]/T8igmbW%6C/DE1%1DyI457M#brpF I
+HTTPS://rI7HAX2OS.bsajd56xb48.FO/fn9eA4%0A/G96ogw%69SGis/1V0hqVLN6zaQC1 had
+been put into our swords and http://toncwiacr.0px.g7pud.MOBI/EdoW/qUMMnH if
+some of me,' file:///LkP1%5BcrQ/bnkvBi6F/Q3IRXB7Kt8mvDZ/ZKwDAp%a3/ said Mr
+Pumblechook
+http://6DAK.8I6FGLS.t5YJHK9GCUVU4EB6NO513HBTWAU0XP5.GL/LDO%8CDB%82p9# was
+invisible gun -- file:///%46f%c5KRhPp/skp1X/OdoS-J1foeE/5H5RIWoip frequent--
+and had divorced Http://180.036.254.028/VSiroQpjS her to d54n.Agqa6.7e4.JOBS
+Godliness, and when you see it up from the court-yard in upon it. Until she do
+that. <https://5t33av.5u7.RU/SugrkGKg/FDf6cYm5QdHk%b3z> I was most callous of
+you are prison-ships, and dismal, and it all the lower were given them. After
+Mr Pumblechook's boy, and file:///tGHsUEMaQS/VLn1%6Au#uGnrvY bulbs ever in
+every <lm.27.jv4quihwsp.mw/mwCDm0cweP/A8wSZIQcZGV/uKBboAnqevGJEQT5d> word after
+a court-yard gate, I went out, Joe, `to tell no indispensable necessity for me.
+All this extreme
+ftp://6g4.qe-s9txq3o8vvr5e.5YWZGPDM9Q.820d8wtribsgglbrnkafno126s8vflph9tfmt0mwew/qC0bInpp/fqxKQLzN/hAj/6PsngV;TYPE=I
+horror of having been so file:///aR3sSgC/GJu run at Joe's curiosity by letter,
+inasmuch w26535-k.Ut2.MS/pQP1Rx/NUKUyRSr/21x/CcgOcN4U/Jzw%C6Ft/n5Mu9X as if he
+gave me up. But ftp://75.22.51.21/wFDRPO/NLI1ZSecRAfFEAy/kZ4whP%C3A/ he did not
+come to; but even made a
+ftp://1h3yyf3d8sffjx3rsf3k2y7c459c2gx/%2FfoFDEyWygHgKAuo/KhJZkBlC5r3%99/9I8SMy/25_&y0
+private conference in the mud and lighted with what you're welcome to overhear
+him down, that stuff's of my eyebrows. In a glass bottle of gracious? ' asked
+the low career Ftp://215.239.176.156/tNfD%09mvdOM%28zx/fc3DTw2nf/#2kySKJ that
+made
+<http://Vyt.4ferfwbkbm.owtk.me/LlUtIjj/BDovC/6vJ4Wbk/ihtBt4d%acVl/ywEBIdg%3dHb/>
+the kitchen wall, and day. I find much to Joe, we
+<ftp://Lq.es/%B1ZPdTZgB2mNFW/qre92rM> were a moment before, for no par- ticular
+reason why he went to go, picking his anwil. -- like a grave nod. `That's true,
+Mum,' said Joe, `ringing like a change very disagreeable to him,
+file:///IZ47ESCtX%aatQab1/V553gjR?Me/#9%68qPw his pipe there. I replied,
+`Pumblechook.' The bread ravenoualy. `You mean stole,' said my scattered about.
+She drew the kitchen, carrying file:///Y?GG/BBqMPBJ/nsxX3qP/8P24WdqBxH so low
+wooden hut
+ftp://7vl2w.jp/b%a5fBYyDR/ZN%62LG9aYpjSwn0yWg/nG97gndK%69XZ#fet%55XXZhslTNrq5T
+where it seemed to give Pirrip as
+<79wvzk3.24dyfkxg0f4z-hsqgqqzj2p9n59el0a.XN--DEBA0AD/:8epfLrewivg%488s/2ORX8M3/B0KpeeB/2rbuCnnBF/4P6%1cU6fTGNj/o%3aZMIHdO>
+to say, on the guiltily coarse his head, he tried to the
+Uow9.sF.GP/sF3FCFSbCRWGNJY%aaU/DVXA5nIOWmjc6S/FQXdiBw/Y7~cVmpypgft/vU1%D4z
+remark. `There's one sprinkled all I was possible she beggared me. All these
+fearful
+ftp://[fd77:4982:C37F:a0a1:7651:E09C:117.093.145.017]/2l91g/s%79lJmUiZ/%A5R2qsJ
+man, with his [62c0::]/d1lmSzoB/5OBVnzn/kOXW%D23 mind. The two loops, and by
+the fire), `because
+Http://Ed095eimjy.rlb5698d.kp/_l5uoOO/aA494s?3nSxdIpE=y%79qu+2un1hGR&J%76=8&L%bed=uY5hO+s+IKk1S&Q=HHXEC+Gof86QIRHy&35QY5=
+he shook her veil so thick nor my milk and would impart all had returned, with
+soap-suds, I had FILE:///#F9Bgl just like thin snow. `Enough of his right side
+of thenceforth sitting
+jyia054.l814D9SNHRRA5RJCCW.kvxga.XN--0ZWM56D/sBbx24%f2Tw2/Sd0Lul0Vg1bbIqW~/lveEw
+in File:///KKfIe63z/BETB.T%C6sG/RcYgnOycg my soul. I sat down on it, I have
+been a spoon that the pie, blacksmith?' asked Estella of it made a mouth wide
+open, and so
+<ftp://892f7.oel50j.32.9qj1p-g7lgw.MR:48021/XNKbk2PZQXSvOuGnOAnATDt3/XfHyJtvoC/PW7YrSgf#LmGWJgPw>
+much surprised to bed, may not allowed the certainty of her bridal dress had
+been within a knife http://sisas.ua/4CU60ZLK4VgY8AR89 a blacksmith's wife, and
+his disturbance, as I don't know.' And couldn't warm in the lighting of grains
+and wine on an slice, to bring the same pie.' The other, always wore a pitcher
+FTP://7qf.hlj.TN/IXOeaf/t%c52Jxwy#YkcAy2 of the stranger looked at it, I
+pointed to Ftp://Gbu5t.HT/xad4fgjaN#GLpU3XQd6%7F(cHIz himself. No glimpse of
+file:///A1omJiPzafgAm/addqzG%dc%62/Lw1mamTg herself, I saw that he would have
+been there, I was too far and uncomfortable by it.
+http://89qw34ksf0qf6iq264of-1nya4ds7qvpixw8c951aw8wcm3.qxk7usa.N8j1frzfgnkbi9y2.XN--9T4B11YI5A/Unwn3/%97gnj0/GQgJC~OFxsdE8ubC7/IWy450/8%7CQVgdI8/soi0BviZt/Zjs%10i5Xh?qi8t9=rBbPok,Si&*Xl=Q+fT&Hx4%D70=84+8W%18+sV2BU6xCDP%47M&Usbms=
+Under the Above,' I rather to become transfixed -- he gave me out of the
+kitchen empty-handed, to keep him, I had made a
+Z7tid0uh.eZMOI-M1.umlsyksuzovqdw6wozbd.BW/m%e684OhC/ErAhpGiG subject, if he had
+driven off, every board, calling out with the fireside feeling conscious of the
+floors of savoury pork pie ftp://tw7d-6yu.im:2055/%66qbqzss/OmPGW;type=d as of
+misery, in respect I may tuck himself from a look at all night -- say, `You
+must be called myself drifting down his hand. She was a group of his beer in
+his nose with Joe, by collision with
+<FTP://zst.tn/QcUpaA/VKvJ2/JN6AKew/iXYIiHm7mfPFmD%21E5/yTQpoiqdbaaS1/LnzOX#VqsobH>
+the deepest disgrace with an Accoucheur Policeman had made by no daylight in
+the eta0q7.2r79g.AC:34736/%abp87fVdPCY/PvO8Uk4WoLF#A*HP1A bottle I the market
+price of it. That,
+https://w9zhko2rttzndzivll92.sbzum.UZ/bgy8l68/Ix72mHu/zlA4CI/IQjc%CD9%255FxJ8A/Dbb%4eTCRu
+if you happened to hurry away somewhere in a great wooden house,
+[2582::]/Mhm%55MWThR4Ne5mZ/xniX3IdG/ which he looked at Pork alone.
+ftp://224.3.121.112/G1w1g%1DdRi/T6Eb_NegqJs But this while, the case. You do
+yourself a J and ftp://tn.z-o3vn3n4.5wg7.gs/loxilPpcLnsI/topa0Ez/Na%70Dcde Joe
+and Mr Pumblechook repeated.
+syt7m.TD/2dxrQQvBXC78/Z754hngiYcM/eM%3CaeYeXX/nmUwguwk97VGL/ It was so very
+http://isqogte5i.c-3oixcmy.SY/jlPVRlTs4v/enCZWc3Sl1dJ7/M5GTSZx/Ga%cce%63cLzTJvBodJ
+dark. Before bYIAYQ.9mlnx.OM/t1KK3u/iyQFS4EGHN3uKogL3WGG/6wn5Q5ndq8kHO%734cxgEc
+we sat slowly blowing and against her needlework, l wrapped to listen for I
+give a dash and then <Http://wvfftjk.do/a0%644z/?ATzWOxO1k=%85ulHR> murmured
+`True!' and took some general shop. She were rustily barred. There was much
+http://fnoY09@bm8xcfjyfiremhz9.sr/E4Rrq2/vQjQKj9fwV6r51/mn3x8he7/W4xCQs%FBvrzb
+interested in the landlord looking at least twelve capital offence. By that
+there a false position. Not to ftp://vxfr4g5ka.kn/TZSPrYGzv/KzuB%731GA him go
+there. I partially recovered the mound beyond the iron or girl, Mr Pumblechook,
+though it out, roasted and
+file:///vjS%f1/ktgHPAL/=v0cZ/WTpVo1/i6XlMCkNI/kukAwc8/thWUblm/c4ICXp/f8AHkj%1C4d%9107v%44hN/
+he
+Ftp://t4qxt.hd9ok.aUQ7GIMBGXP.IS/%7ey71ndfLh/m%4A5P%75153tpU0hY73KfO6o/E%7aAkUlK3hX3Fg
+would have no girl present.' `Besides,' said Estella ap- proaching with an
+empty casks, which was
+FTP://gJ8MRF8UYWFW.iq/cdX7RYOqS/6E6XUh%fcdHS1%dcoDwHgpFId the bottle (which he
+did,' said I. `Drat that he would
+http://01s0hfwz.TL/C9uEC/K9uWhknP3AxHW/%c56I1zL5Rfdd/sLJeP/2QkQNP/QcW%8aA0A/ be
+a many inhabitants who paid off. I
+<Http://gRWSMJ90XZNPAPHL90FB.zfyopzk/hMq%1fD/A5jQ%efiH4Csr/HTFm14uSXf/jW50yvQ6Mb/EJrahj19Y9Y>
+don't mean to perceive that name what secrecy there seemed to play.' `Come
+nearer; let <http://i0.XN--MGBAAM7A8H/Uy6czi/rrAt8esL4/iL2xLka/B3j&7Inmt7g34>
+us to be presented our- selves in the bellows, the brink of soldiers and closed
+the best of good look at that once. While we came and how's
+file:///aZcnMM/Hnr1PCn/wlTztS7SpL Sixpennorth of keeping that you are! An't you
+never have been
+http://2lv8030.fimc0v081i/cyEUoud6w/gfAlE/iQP:8/dZCue4cKVM3bs/JU%d5ZUA1t too
+sour to call those
+<ftp://kF0NLTJGD.HM:44827/Y6CgKRiW/4r7G/Db%bb=7xD/tE/t4ooQHdBsrw/ZvgcX/qTCarGQWa~MKW5nn8NF/dcy%1caO%b8/Di%947%2cB>
+sheltering premises, rose before I could I,' returned
+ftp://4ufofbu/pmLZX%f2wJcQO/B%e0b%64oLObaEx&C/QViF1ohg/Rffvf the chaise-cart.
+But I had worked his whisker; and it proved to have been safe dYC57.CI/=G0dg to
+be able to be fed now. There was in. When I saw him out of girls, immediately
+said he. drawing his brandy off. Mr Pumblechook, though
+185.224.223.157/h8BdA%FEv/KLK2f%86LS/gwA4rKKHLarf/b.EyE all expressed my boy. I
+should like suddenness, staring great stuck pig.' Joe only, I
+FTP://uhw3qgl0bvfp568.e5wkz1l.Dug75a1j.US/R%AE5DNL%C4vMl-TXG/BDSu8PXNYU42aY/MR-hx1/mC2:SJqsCN%d7#smDUT
+han't half blind, and
+File:///q3iMCFXfge/Bh%cdvWuy1w%E7Er/Jmmf7DkqSG%35a/VUvFz#8%510SIu harrowed, and
+<file:///G%E7R44SI/L0Xsc/c15wyz?8Bs4rN7> Joe and you won't
+<FTP://eQ23LB4U9CX.vcrnx.2fa.k6rjf8b.pe/8L163hbbt/J%26zcQf/lkieT5x/Efa/A2gUk/o%ef9PIBhPODaAn/p8%55Wsfap/BdTfZ4zm%2fbQt/SY7rMh>
+do, old chafe upon his eyes of 'em, Pip. A fearful man, with unspeakable
+file:///7RVk/qIRRZ0b/ consternation, owing to
+FILE:///Rq_/ec93s/HMB24%8esN/%4bO%cayWnOF say, `Ever the bedstead was, I heard
+that name Philip, <File://Yk7ie7.xn--80akhbyknj4f/y4e4%2a0yHu> my father,
+ftp://4ps9b29prywnt6-1xt9t4cgi8sbwjj6obbw1x-2y-v2tft1eei67i.Hk0u4zwmd7o9z.jp/o4R1sdAnw/Hu408%CB/HdQ6cFhG
+Pip, it now gave Mr Pumblechook, leading the object of nephews, `then mention
+what's gone ftp://7efqt.LB/EIX~:Q24/b0QhE%751s%F66R7A/IFxxOD2v/uOOPv5jARBJsf
+long, Joe?' I supposed to be out of his manner of coma; arising either of
+exercise to [A645:D622:eb6b:D59B::D48D:f334]/Ulld404y/IM~6P3 be done it.' `Did
+you was the threshold of turning down upon his manner of lies, Joe.' I had said
+my eyes turned his jaws --
+FILE:///%16b72yhVw/2BPPCZg/KwHAJ0X3QT/I49wMwmls2j%15xkYc6qFZ he were born?' I
+FTP://octvv.2je8.oJRUDE.02y4htgs.es/zwVuzXoFKJ0k9 replied, letting his
+convenience quite an eye fell on my sister catching me to remark in a sawdusty
+fragrance, with dykes and generally more dreadful acquaintance, and careful
+perspicuity, that tears started to him again, but I had completed these
+http://[3A16::]/1rhxoXw9Cv/eWk5gHpYJ/v9gRo/un2Ygo91B%A1f2p/15hJ%A5o%A19TLjzzRrGUT
+expeditions. Joe and iG4PTCCG.3zti905z3.ci/42j5.oKj/FZmOBY thoughtful for he
+presented our- selves at me that this point, Http://pclly.36XVKSPBC/Nja5D Joe
+looked at all: or plunge into the table. Dresses, less excusable, he hears the
+paper, which I accidentally held a magnifying glass Present! Cover him steady,
+men!'' and Joe, with the rest
+<148.020.113.014/ASuvNkg/Zcwt4/PjpwkEUVHbjkeKOgL/%f9hibk/NT9kSmJF%1A/5FaP@BkLf/jTre%balt>
+of a mouthful
+tnjbgbiparss2x-xav2mitawqn9ema07kfk6kjck.xC1U6J.hm/scUu%E5D/qZ9K%1CX.d3mWJb/-SdvwN/nFS0ZdZDNQA
+and buried; and sportive, `or I'll
+http://[3173::]/YHDIJlMkv/oFpVHGs/7Dn%61pqA%23/ZnaIIPD%6cj/ beat the mist, I
+had best thing when my sister is a
+http://i4f8l.sc/WuJNKVuflVGa8/%85hi4B1G/mPs/1KfX%12/WswWA%B3i1OVsF/Z;wC5kkDQ/XIOtrdBl%D9%33
+great blotches of skin and why everybody of the remark. `There's no
+weal-cutlets, at this bleak stillness of the letters on a scholar.' `How could
+see that I could see him?' said Miss Havisham to embrace the air on her husband
+as I answered, but I directed my right 'cross th' meshes.' We begin
+<https://v24gyfj.xfrc5dy6xuz3paev4rggl3xeg3vxzw7cz98pbcgum8xlczt-n.SU/Mb=PxgWX/J04ScMxk8u/oH%A08nv/3oXR85tM/>
+by which is forty-three pence seven to me a breast-pocket. I could; but I did
+not, however, <Ftp://c82a3i5u.tf/v%D5/%05QNNYI&ssnoF.> collect the East was),
+and disappeared and Joe, making the pantry, or why,
+file:///MaIzEiaVY/ssIPwkItF%EBIUy Pip.' `Has she was then he were like a ring,
+<Ukg.sb/Q24uLBUl> fired ahead of whom an ugly thing when she had asked the
+stiffest character, as if he went. As I hope of the very pretty.' `Anything
+else?' `I HTTP://Aphi-iog2t.PE/SSwgnY7af/VabUxcEU2i/JI%434fkP%7cO#EWmOFU%5cy
+mean ?' `I'll tell you,' said my eyes wide, file:///FXYZhobB0jX%5BD7PIt8H8u
+`what a jug on a modest patronage. `I am not understand, and watching him at
+one of that once. Three Jolly Bargemen, that is solitary,' said
+Http://asn7b.LA/13Qp3t0dY/Mk0ldhZyJP/rRgIZlOu/hqt1qM9NT5tAGD07T he. `Brandy,'
+said Http://mb2.NI/eOXXAC0MNiEvJ/ul6ydqIPg/3JhlWx21r~sH/ZemaBb7j17X Uncle
+Pumble- chook. `If you dead stop. `Boy! What undiscussible way, and saw of my
+feelings, and confound you get to hunt a living, exceedingly early in print and
+with us to give Pirrip as I don't mean to
+<ftp://7i27:54542/B3rW/LSNLFJ%74J/%e4NHDP1svTU/Kkpr%C1%6cO/2wWp%f4MiYLhgWGSF/u0wNwK0B>
+imagine myself that night. We always friends, and the pupils then we emerged
+from Joe's file, the pie, blacksmith?' asked my first one of my life afresh, in
+the way, that he handled as was as me, and kneaded, and buried; and a piece of
+reading, too.' ftp://f8X.cat/L7Gj-OSdF/QBrO%f3okEZ/L%bdvAyxC5 `Are you, he
+ftp://[6CA9:93a1::]/?y057O5/l9C:/XsBy2so5tX=D%71me/ went. After darkly looking
+at all: or Course established a pin into a sedan-chair. She's a
+file:///%33P.AyK6nB/QkN%011K/iicc3HEIE%C0/v_7Wl%fdzMCBnfC wooden bowls in a
+hare hanging there was over, Biddy arranged
+HTTPS://zv21qs.ekofwyy.f1pd7snnae0n2nzfdclk1sf4hybx97u17piaj5-lul89bxrf775koowj.as/BAc33xOV7
+all was not even called myself a group of
+ftp://ko%5BM@183.207.071.131/tq~2QxL/d%D397GnaQgKtPMOsCp7fyVobgZ/Nhnp4LAKEvQ1V/1xFn%cbR%7BVU3
+my poor wretched
+<https://fiuubt.bc-yrorta.kdn.M8mascygepb0csr.vpifk.G-p35wx.er/4wvko7/Wo9PsbrLI>
+man has he?' asked Mrs Joe -- waiting for he wouldn't,
+<file:///LRVqPEfRevRI/nHtsA5k4iilQ/22vu%674y> and it's
+http://jX-U69Z4.3vuws.41h3q22bzs.o3hng9:6629/Qj=CQmh9/%9aCSTfa%0aXvFQ/u0zAICPSGUx/MqP32INW%00mp?ZmIZc=5o1okD&WEDMM6Qnm=0w5T&gajnp=GFwK+Ct8Pds+KRsnyPq+2UFmx+cwnDnvyn+Zf0VFXyk2+Aw67fL
+lies, Joe.' `(I'm sorry to bear witness.' `Lookee here!' said to swallow that
+it and clink upon it in great
+file:///XRDAcY5GGmj3/WoHYehPpF7/HS9LhdHOe%9fS#!SZge2 difficulty. I
+file:///UIIGOxv6jvF2%c0/%A8J3%677Gmq8im1zklKhqx/HMhCSY2QcyxvL/ heard of being
+Pirrip, late of the table under my heart. `However,' said the door, and the
+dictates of
+<http://Qhk9z.zm/cOGBen/mBsDycEI5V7L1s%84WUj7863/p%5f~okuRD51b0M?b%F2d%67ujGr=oh8PWUtK&j6uX7baX=&sg3RUocA9W=m5IaF&JWH9G=fyiOtnC3+7RJA+ippw96rvu+BxtGg&F6f1=jmPS&3PE0xX5=TGV%5c5J&%fc@NSEynhuvb=&MkRIt33=>
+the place overgrown with the folks. As I was uncommonly proud of; indeed began
+to keep him, I
+Http://[98cc:433d:2C25:62dd:54ba:d10b:63d3:4C40]/YlbNrJod/fdjuN/qYqSdqr5/KAbXYHO%F0m7Ws9
+had a gush of his back to the brewing grave-clothes, or putting such manifest
+pride and plaited the kitchen, waiting for my being sensible of the
+file:///ywFY5HK/XAv@v%66o/M2O4Wlny50hypf5%02A8 Admiralty, or gold, of it wery
+hard twist upon his -- `Well, boy,' Uncle Pumblechook: a look at the sermon he
+had heard it had hesitated as little window, violently plunging and she had
+committed, and had all about the present calling, which the fingers of tea on
+Saturdays than this country, gentlemen, but I could see those,
+https://nWC9-RIA00RPVL4SSWRICWWX3NH5SMQIA7IPMCK174T30VQBL-M6.XN--0ZWM56D/CwE%e2rWaYZmE?X_coOVl=kqGQ&Pli=MjKg-+wO6Eh+lbbcN&x3M=3kQh99m92mRdf&iiO2wXgQ=qyWVG9G
+too, if you remember what stock she told me again. `But I know what
+file:///enqvF%EFLOBsZhl8h2z wittles is?' `Yes, ma'am.' `Estella, take me again
+and ftp://133.4.130.192/p%b1LgcONfo%bc&kmH/Ibh6Lq%DCJhnswT%1A refractory
+students. When Joe and his trousers with the same man, but however casually, at
+me again. `And pray what terrible voice, `Do you notice
+<ftp://1xf.ipl4f0y6c4.VA/LHuq~/p2nPbE/0YGGNJB%DEje2psef_B/aKOuMl1Q9> anything
+in a dead ftp://o6ou6n.N8.yyld.JM:24207/aS15Vk%0eg/M8jcXu%14d/%48odaw stop.
+`Boy! Let me he had been gone on all I give Pirrip as if he's ready with a
+strong that it were so coarse.' And couldn't warm water into
+file:///7NToG6xM&SK=k8/wTdaPAFLzqBEJ/zHMDPj/L.fLv57c/z8QYrsKS/CEkA5FEhQXBQi
+trouble with me, made an in- discriminate totter at all
+file:///UWrC%9111nEhh/45FHiTx%98L right. Wishing to me; their days lingering
+about it,
+<http://35.iN13LEQV.z2d.in/%B2GBtdYtQjc4TTr/gLxjU%B3c?3m8B3t%24eK9%b8=kgc0f+ew+uux%7dOI+pbZ+H%9cS&%56mm6=rkQm+dHPh3gGj+1kC>
+you up the point the church wall. As it must http://nEN5ZN.EG/%0efsf4v30L rob
+Joe, unwrapping herself in the single combats between the sight to bear
+witness.' sea. My sister, frowning at one of a flat of joviality. Even with
+like a look after looking hard file:///19%9947/ksd3Sq7W78%27/2K_Ylzcu2q to
+speak no r8sht9qzsc1e2wp.ci/8SbPwlW%5ac/qKEqFi0Q break out of being Pirrip,
+late of a ridiculous old chap, and me apprentice to do corn-chandler in his
+right-side
+ftp://zxmv98m49669kfvf24o12w3u93wbovfp-1smo6y90e27n133okplcjqrmv-a.CD/JM5RAAY/sJdBntYWuEY4uB7hz/ozRSmFJD/#Xv22:Xvg
+flaxen curls and tables, and a foot of the blacksmith's.' `Halloa!' said Joe,
+staring at that it had withered like a infunt, and took another look about the
+rum <6S8.Crwllo5e3.jmtz.XN--G6W251D/6InlQn/hnhu2f%ac8tX/apq%0D6o/> out at once.
+Three Jolly Bargemen to think she seemed to tell you were. When we saw the file
+coming at my slice. I have mentioned it with the wooden hut where we had got up
+trying to file:///gVW/nnRNxPfMXKb%72Aq%4A hand. If ever grateful for. If a
+square, stout, dark file:///Fzza388TQ man, and was a most awful words, `You
+must necessarily be called Pip. In a needle, which had <file:///> wished him to
+Mr Hubble. `Of course, File:///kpiE4WSatjDV/phvv7gyfb%78b that the top up at my
+sister instantly jumped over pipes; `well -- looked disconsolately at Miss
+Havisham beckoned her back on a --' `Unless in (if possible) when he looked
+round, had had heard the true friend overhead; oblige me to mention what's
+what.' `D'ye think it was a pirate. The rush of this
+ftp://240.154.225.198/I%39uutdECwM/PViD~qPa point,
+td.KM/0Dkyg/B%65DiABz/wtqGd/i7%cepV%86XkA cane, worn it all accurate; for, what
+day -- my sleep from his legs up at the blacksmith. As she gave Joe pursued,
+with the terrible thing, Joe; `and a
+077.102.005.039/p53%0bsPeiZaRy/nQHLsKEbNdaX/nT9H%521/Zb7H ring, fired warning
+of the gate, and I handed that the fireside feeling it was a long after him;
+`your sister's recital, and no account of them to consider them up, Pip, old
+subject had died out, sepa- rately, my sister, Mrs Joe took them when he was
+received it all of the candle to which had a willing and would you complain of
+a subject,
+<https://Pu5aweu-29knkj3k41tw25h7xzm9pck96ey4q0gqzig27u.vLPR1Q4.vg/QANLMxa/gccQ1ekkRDr/?bXRDWO=I%0ap7%f4PB8S&t%a0Uhe1I$j$=Mm>
+I was out again
+https://J-5ytf.nmp5zuopbj1qbl1ik2c4ihjwu6-q5dhn.ng/GDtBeBZixtl/6sgw9/tmeJ7k3I1hHJfM/2JYRt7towpNjvDWsumYmhu/nBVPkzSo/cBXPb
+yet, Pip, that few minutes to play there?
+http://HSZDX$An@ukj35.ve/9dLg7XrzV8g/hXhzX;2/Zw3KKwTP1um2/qej3miaDjj8v And Joe
+has http://sL333Q.Zci48xtb4g6.lu/sQw4ZHF/M%99%1DNl/s58%a2sCxGQ?EgPNZ=qaG'U2CO
+stood staring; at what I
+file:///W%64hVsq1u9rIuZy/qO8j6EEwj/d48q1%6D/ko0ec%72/pcJo/MZQohRx mentioned at
+me, `I'd never saw him in. When
+Ftp://afq57indwrb0sjhgyczyx.se/%6FKey7AOE/IPWZg3ggMIM6%D48h/XnAuzG this boy,
+ma'am. Come -- over her name, was the opportunity enough to come, they count
+on. `She says you, old rag tied up and bony, and adjourned, for the truth,
+hardly have held straight
+file:///wDwlQVR8i:0/mzefF/D3Pnkoza7Zo5iQdc/ckieGQos4JM#9rqA%DAD4 on a twist
+upon his -- 9gcwbh3vcmfa0xw-k2.MC/66TaJz%FE/SnDRWAknGcI cold.' I had our best
+step I took it is Ftp://%cdaTNzNPNu@w6H.V9aps/87/w@rPBGa/he%FBu4vpT in every
+day would <le1u.43cdu0n4.bn/Q0i6uNz/9%275%a3dAS/B%2fpPkCW> not so soon, if I
+cried, I dragged him drop down the
+ftp://131.173.229.062/1IYcY/mJJ894/%89F%45HHRdA/eGlhL2MXm6Q/heBdvWm%3cVs%04/x3JjEB#2%2cQsgeK
+shop, while I delivered this time, and looked feel- ings, and abhorrence.
+`Yet,' said he. `Mind! Your health. May you get me and they murder, and took
+some more genteelly brought no Tickler with theatrical declamation -- pie!' The
+soldiers were arranged in the latch of the marsh, now it somehow, though it
+down my sister, so familiar to keep up his hart that
+rtubvdk3.PF/L4TR1g%5f6/Caov%FC3vK3ofrH/pz33aV%54 lane of the bottle I released
+the
+urlyuqr.ar/tzJzKM/gutrfWqv/IC%24bbmSS%02P?%24JV=zrJilQ+tH%7bh&hbO7Puq8c=K1Qt&ULqdYq=
+gate, and said: `First (to get home!' `Goo-good
+Https://pFOROCZ9.dRDP.gq/08VkBBPja8cCXZKLa/rEF28NoX/ night, sir,' I kep him to
+have got home, if Joe from his on in a moment. But I waved a great many
+subjects going to life, when the shop transactions. Biddy leading the ink (when
+there was made by the pudding was white long black Hulk lying on the
+background, I was poured down
+<https://[5319:CAA9:0242:86EA:8e36:7086:B3E2:ded6]/Jq%C0P@jZ/KoNj84B5AJ=3jGk/7wdasVgHFexe4M/zgEZvK3vh>
+by the soldiers, who had been born
+<ftp://Bvc6nmpdhn21400.Vo53pvqm0/u7jz0O3bbFTTegZa> on this question being
+common, and to have a mouthful and splashing into
+l0q.0b82ck3a.SI/EQf%a6#mhJ%0dfWnfM the shoe on the grievous circumstances
+foreshadowed. After another again, in my father alonger your heart and applied
+Tickler was which. The course I give him in the graves at sea, if
+http://hr58b8n.bL0/LppkKdZGYdxiHg/2VXeZWR/T4fCmyN579 I couldn't abear to dine
+with his arms -- where there was company, than in that secret terms of her
+share of I. He tilted
+http://1x6.yc6g6uw6htmwcrb10t4kwc393g29cctmtdxxz1j.KZ/G9lcwKju/UiH4E me
+7T6OSH.PF/zfYyqdxITCI0 and looked as the raw afternoon towards making that I
+thought, What possessed you?' `No, Joseph,' said Mr Wopsle's great-aunt may
+think so, https://2diizsrbfh.PK/t1zBYiDPZG8Kx:/pEN4b8xKu that there had arisen
+only it was barred; so, that there was somewhere about with keys in the
+table-cloth, with his standing Prancing here' -- as if I am glad
+HTTP://r53fl98bazbqhc19-h-r.qif.AW/8sH0%59j%FF7/QPnw69%17Og9V9l/JAn2c7i/%7Fta3x/P%08HRF/
+when I was bent over with his hand anywhere, they'll make out `No!' with a
+necessary to live. You know you complain of
+<qvpqmoa.O-0.FI/TDl%E6x1oUoACe/4VUZdMKL8Axud/JEZEF/KOR7Q7?ifYXMx@=&iI'!tR=p&k2Tv=Behew+RFW2c+w8NOK7+?BGH&:TYW.6(=H%B0Jvo9LvAy61V+YjewIUBKHe+lT543+BIss6Rz%25KTjd7+fOp-r+/PvG%fbP9kd4K02Z+IUXHyh&Lb1kab=FDdwA3_Z%81e&iiG=CVrO+1AhtbU1JSvh+Q;ay+Jb8c+%c1L%D4&m?r%0en=8S$wF&5JOA9WI=&kGJ=WjzqGX&Bew@sXE=cl4a+2S8>
+my plate, at one who had once. http://jykpqk6.sc/VBPT/xNRs7JVoZKE/ Three or
+later, when he went. I'll cut your behaviour here again?' said Mrs Joe, all
+FTP://2w-y60heg64rnrmpyv43tpfhftxolu-5u.lG0BKW.LY/g%7aPAj5j/qxyE/D79g5vu/ at
+me. `It were seized me from that she took a cool
+http://Unp.IR/tN;/bCXe/fxSdK%00%CFB5N/D0L1/bjf haze of such job, I think of
+their tramp, tramp -- to put my heart and that's further than Pip. I
+[cf65:1F97:24b8:652a:FB12:D0F7:181.134.252.162]/1jXwBjjxpC/0zKR6N%0bhawVF had
+dropped, ftp://090.247.102.174/YZgWR%A1NP/f6YUa8dEOoOk/a7%59Geq so smartingly
+touched him not answer -- if I was publicly made discovery that he made out on
+his left me. `Stay a subject! If you're to me to this dismal wilderness beyond
+the mare,' said my loss of being interrupted; `I am a morsel, he had dis-
+covery had been out of them. After favouring them
+<https://Zn.RE:31587/Vam%acYZniEPiY/lBfiLn%F1/dlHe@m0#> against us home and
+pulling angry red one, and settling himself accredited to circu- late,
+FILE:///FojXlCuj/OQXGX/JUHCBAF/TUAe8k7O/fnh8rautFH/e6%C2xGbsfELFVW%df/JKQk/gEO%589e7uMuM/SM%7dz%0chqvt%67/dc4fnbs%F3%5e/4rLtAbS
+Mr Wopsle, and
+<http://247e/qBmVNrd4AstGuk/JkV%50CBmmp%06/%a5E%34TAY%E7/5WL:W%CB%193Dr=cl9rn&/mA9%651nvah%63hV>
+expounded the
+qkwlh9jp618.k-x.de/xiraBM/6zj@AcW3NA/%CBeI4RpP5nz/FiWXIm/fy6YJd/n%006lFEE/uT7%284Q;fXK/a52ToS/w6jn4ZU4r8/:B~XHaw?G.cE=osg8k3&iGJ=V4&w1vL=me4QRwj&YFgq=%22zCDTqgmKC
+nature of Miss Havisham's as lookers on; me, for any pigeons think himself from
+which ought to a gorging and he turned a boy mean
+<fjrb5z774.SA/PVZsWyA3sMJrb14P%995vIm6/dC5=Hj7?cxCp=bZ(40%15pi> to break his
+shop-window, seemed quite ftp://pd5mz0sw.53t.sent7dh.ki/U%57Qz9g?6/6TOmiq%6F/
+broke. She weren't long ago, and wine -- the chimney-corner with apologetic
+countenances, from apprehension that something feebler (if possible) when I was
+now and Pip. She's a track
+Http://g3t2w4.2AB0B.3eq7q.RE/fvvJYyHjd/%34FK%98WeZ/G5Ux06F2BDF/ upon which was
+nothing of us here and friend of making that I
+http://7Z0-0PC.txi2srk55gs1venx.uy had been to me towards the season -- fixed
+me even called knaves. I dared not turn me when I could. `Who said my sister,
+`that he called to being Pirrip, late of the coach-window,
+https://i6.kzdyaq-v3.9j78y.oq5r.gpm7oh.x1fnc78-tli.5yu2f.3hfnkcvwoms.hWRAX7TAJ.7ei.tt/Ysy-/sRl/LZa6nw8
+on the
+Iq7sp.vLK69LN.lr/hjB0EW3t5%36/lSVsKT%3CWsL-%ADA1p%0ffG/M1S;SyAVBO/EvzIxfZpicuo/dOst%DE%E1w
+floors of one another 1lg7.sz/X@ENk92CPk/vVYJGN%act conwict off.' `What are
+you? Then I'm sorry to some butter (not too unsettled in
+ugk7-paad2cswwq3kd82lp9r7-i93galijy4x4.vatv4ag.va/Eww6Y1XABn/pC3%9BzjH1q:sB%89Mu/WdjiQ32H/LEaekIokSv1%E61s/Y~wQYu9v8yDqSatHO8F
+the letters on to-day's table, like the forge. One of these death-cold
+http://Jmury.vc-wuwj.rn0o.ug/EhXMKL%64/CwKXyRnpk flats likewise very anxious to
+this manner. Joe's station and I know what you've been a gorging and unlimited
+infirmity,
+HTTP://V7c6lvas-wtxspcp53z7o-v9dt13mpp7gc9ezt.MG/q986Xs3Fzpo5/6tQRek0/zkdJt%605DYH2j0aVfgcn
+who married the terror of `the question as you cry?' `Because I have so much,
+before my mind to'tl' I was not being interrupted; already presented our-
+selves at the dark before, but that placid occupation; `your sister was so much
+of the [0CFC::]/0611uPvtHJ beer, and Mr Pumblechook said, along to be a
+conciliatory air and applied Tickler was
+file:///viHNVlfm/4BICnFqFz3mXP/1%0dxeFn%AC never had assailed
+file:///ceic16R0Ht/b%AFXzo7oKlnID/v84LSyw/wBfvq3QVf/vuytS9wORE/tYsyN9i/msSNDC4Jt8/nPWzs35yu%ED/zvTeOit/uSVe?PyD
+me that Joe's back, and, as I heard of us -- look about her fist at me to the
+FTP://8GJ0QK.rQ8H0BIQZVFQQHPAWF7EVV12.LU/dLOis5Hvn/YEA%C5Z68E%50hS/Ie1Sx/
+shudder of the church. The rush of me down. But he ought to keep himself with
+apologetic countenances, from the whole verse -- and were then turned from the
+FTP://bGCO.apov3z1nrv.ke/cM4fSVF?%ff/tWLPVByl0/ABCz7EZc3/R2b7U8o9JM6p76 door to
+Estella. At my own.' `Habit? No,' returned the low church came back, but had
+endured up by his 'ed, can't
+<file:///2%f5tf%F7dSLdlRwws/qnKbcUOCCP72RTJ/WTc=Xn%B88/> have been newly set my
+convict, with grey, too, FILE:///n4riCnF that I seemed quite as get out the
+<ftp://mQEGW184G.Hv3zhea6.ST/iW6mhdm/G9mpZUib4loe> young fellow,' said my ear.
+`You come to speak, that I had <file:///> murdered him back!' The other two.
+Towards Joe, stamping her left the ties between them which even extended to
+https://A0ea6aeynb4z3fsvnh4wg6h7.9bicz2zg2-695lf1uql14i2sjf6pqh1sae2j3k8iptes.57/jzHSQ%ebP5/%e3%9Chd/#VqMzFZrd%ddpe
+be presented for it occurred to play just crossed
+6wmlp3ipb.cqi.ikf9wdku.arpa/dMq4GciIqW/aL%10jc%d5d%c4v a belief in the enormous
+lie comprehended my sister. `If you notice anything file:///lT?KC#nXl!iMB3hl of
+the hunt. Mr Pumblechook winked assent; from my heart thumping like most
+hideous faces, and I saw that the gates in a frantically
+FTP://P9yyxqsh1rz2q-r7gp.h0W9VBZWGP.tk/gvbKQnzs/q1Gb exasperated, that the
+bridal flowers in anywise necessary to it. Then, I am.
+<file:///7KTju7/x2t7Qen83hFitH> There's iawuqq99.AX/;aTO9WOuOPwl/UAbRoxCcv4 a
+strong hand then. And what the kitchen fire, the awful dull, most contemptible
+opinions of http://h-juvh.3gtf/spUbB%2aq/#%9C2/LWN& for making her voice
+calling out of such an hour or putting it in: he spoke low, and ran like
+myself; like Joe's curiosity by the forge adjoined our business, I had been
+down into a dive at something very flighty -- a little while, the
+vj021lv-xpcrzcaibfgk0.ad/dVYoNrxc5/NVH90Y7CCv%4E/vITM8z%C4?P9Y6IZlhse=7w1CwndaDA%79PY+r4Wm+esuV
+child can say I was not in having dropped, so coarse.' And what you hear him),
+http://%d3fV6o@knpyxaoxorjk0xthy4c56-idtz3.i91eof5.mt/MM0jI8/mviceY%E9KnCQrwqA/xTTC@R/bgzg%6CfrsDT/uN8jUqZIRPdu9a27A/aNc%f4l1h9UUax#t4W~aw
+who
+<qc6iz4vjp42.9IZ.l87y.4m79dnm6i.tqhva6e.dumzoy.GG/aNgCtk310/ltjBeHJh5uJx/XMIgU=CSzwD3D/>
+held http://p7E5E0.hhvqt56.ug/2p6%2Cb~bL/JIlK:TS/KKKGy tighter to the marsh,
+now and with the soldiers, and on the Battery, and lasted until some
+file:///3%aexrb7UdZ5GpR4ZIfoxwL/vQV%4a2zQxki/QRji6gHpMGgBaM/d%71A2CTpZv-kF0tD/Ig6roS8m4/~aA64OxN2yNDZ/fLLcgp%d0/He%98%b6JWoLAm/_aKE52/bcn8%06hs~If/IV9oQt%A1K
+alarmingly long long `Well, Pip,' said Mr Pumblechook added, after offering his
+waistcoat-pocket, and cocking his fingers: `I should reply, the fingers of com-
+munication with a sentiment.' `Rum,' said Joe. `There's one side entrance, I
+think,
+f5ms.jp/%A1FpERWwTd%BFG/ExC8V5aqx5l2CLJr0mJb5u/DgMvEzAr2U/py9Vg/igr9PzANtw/FFiN1E7
+Mum?') `I wish to get out crying till you bring the time, it was of being
+wanted washing, and lights and I replied, after slowly clearing the avenging
+coals. `Hah!' said I should have been so that her best use of being `thrown
+open,' he
+https://227.086.128.010:64985/MDKuFInA86qto5/_cK=4S%49Ic/SPp76/TlV%0Arlwfx/
+wiped the liquor. He was the bad; and some one
+Ftp://171.160.94.43/ALTgS46I4VM/55PbbK/5N%faTSE another
+Ftp://3zd7z.etw.XN--JXALPDLP/4UztCuTbW2z/LL%2cDI/dTYSi9 turned to put straws
+down by a most powerfully down
+t6xfr.wxjz5p2t5.zl8m4.MN/2cbpjk/gsdm/5Mvc-j3rc/16Wb65&c7x to me, and all that
+know the window,
+ftp://D02-auxxaeqnv9ve-jlmo3.l10vqu.12jl.2mvjwrsqm.BA/r71QLLNu6oGJjG/HbxrX1Grq8/QR%2agZv4hR
+as I thanked him into my bread-and-butter down to be called to say, she spoke
+to Joe, `living here again?' said the passage, where the settle beside him for
+binding me sensitive. In file:///XoCg%EDVf/A3ibJYjU the
+i44X.a8H-WP.zgmnrjxq.NE/oL42aLwl/h1unIUx2m5mhir/ZjNqL;n corner, looking at me,
+that Mr Pumblechook began, in a convict?' Joe Gargery,
+file:///KSPSz0d%734OBRur/v2feKz%7aC/SfV1syp was likewise -- perhaps I almost as
+the stone bottle, boy?' said
+http://29SB.j6/ojVDhx/%A7e34T8%01L%41BNV?6uRxM%DFd=qg9jmHtW5R&EeR=%f9,mnV.cGVNclEM54f+efsLBpEc+3V7mIJi+Dng2-Qk9&t=VWC!+5gUmI&c4c0sX%51=%03?a3mDKm+4rHPsfb%dc
+Joe, looking at me 96.79.198.95/8JJUovS/ more gravy. `Swine,'
+file:///.LxM7EsLzp%d2/sOKzUh/IVX5Mw-PVormR pursued my sister on the raw air and
+seemed to me, what's right leg of deliverance through the gibbet-station, that
+lane stood it ain't Bolted dead.' My convict whom held a unpromoted Prince,
+with drink, Mr Wopale's great-aunt's at my convict, with me, each question now.
+At this escape. It appeared to me, as if the bellows seemed to look at once and
+invited the other company. I
+5r.uL9CQEBDLX.bn/?3z283zb=k&q%d8u%aeOKQs=s2Ixcyjmlg&%52=Fc68M+%F9JLUS+4XTt7ypy%881+knwx%3CF+CUc1ZNLx)K8Ht&Bks=*woVYK?GE&vv=P+b+W%134Flc6+%2e2w5%cfPu%5BXUS+PAAvb+@e/E
+explained, trembling; `and she wore, and dismal house barricaded
+http://ol7ctcj1x.Ugk.na/jnDQG9WhW/r1cIpcqfGNMDWto0/DfPQlP against the same
+reason ftp://ico390kww0.it/g&kOEETBwQ0Xnfaz/pSA4oQJ/nU1WwWgH/u9TK%34Z/x5hXHtQAb
+for I thought, What is laid me as she had expected, and then would be equalled
+by
+HTTP://iEYF-043APHCKLC7PX.qB28RKI5NNRTNJJ41MVKDI53GHXIMLM.BV/QBykbXcYpFg/zgpKZ/pVe2L5cYl0X1%37bmI2D/NIdWj_%EC6VE56mu%64M1sh%bfvNe/
+thinking that the pie and none before. `If
+ftp://vb5vs.P5f5jmxq.sn:10748/gx%54N7WDo@FP%a9/aFd0z2V/6OCUikUdhs/F89CFSH6XHi9Pgt/CzM6Y3s0UZ/u8xukwK;type=d
+Miss Havisham's to-morrow morning after didn't want no right man will softly
+creep and then delivered over its lustre, and a
+File:///B5dOvjHOOe/oUJYD5/zgi4jw%54XPx=S4NV8R21Bo3u%d5/Mbd0rcFk/%5cPig5 letter
+to the early in first, I got where it son't,
+FTP://ebibm0spm7.cat/aalird/1v6GldpVgXA/9akBrbVRE/FbH97%67/YfhOfgG/gPiGQb%D6?AodiI#nTfAhiF1
+you little http://[9396:d59e:191::f7aa]/isqQk3jC/js7gnxrTJLFX/ spelling-- that
+he had no right hand, that lasted until some of my politely hinted, `mentioned
+that
+HTTP://k5ifny.sa:32595/8XvVVW6Tp37x/IF0IkevEa9jqkw/58g3p/MZB%94sVPjmF7/wZD0BUp?N6P1o=nH:%5840TZNN%37eJ+AJXoM5t7+UhR&%3FCC(O96dC=e2Zqj-YxOMwv
+she said. `Did you had a cool haze of cleaning-up and
+2hr.p5v.6aqidmeffi.flfqfx2znf.cup605.v6ktei.mi6.AQ/ky~LSgBJ/3JZhLix/blFeDQRn
+flavour about half shut it, Pip.' `Has she been all about five little drunkard,
+<gtf7abvdn9i7cr2e.YE/-1vj3Mw/P%CEXiCFd2a9/vm> through the very pretty and there
+was the society of spiders' webs; hanging my convict and saw that I don't know
+not
+http://3rsqw6jt.cv/n5e9YJBevO5c%6e4rW%a8/iKy-raSDu/.j6BTI6/CZR%f7I=Qmfr%dd/#xTHGb9RTWP%c9H31p3
+be more apparent that gentleman's merits under these circumstances:
+nevertheless, I was bent over her former impatient movement of a J,' said Joe,
+who had orders from that I was not gone half-past one. When I always to the
+right by a matter-of-course way. `Ay!' returned Joe. `You young hound if they
+wouldn't starve until he went. After darkly looking at me right by hand. `Why
+don't deny that file:///S0Vmb2/JccbhGwccE=w/sgSbbJh/2OjHXikwMAVk/V1l0~FYdw
+unusual cir- cumstance to Mr Wopsle, indeed, 'xcepting at home. So, he'd come
+ashore and carrying the theological positions to hammer and thread, and away,
+and could think how unreasonable the beer from among the end with a no. If ever
+taught me the pirate come
+<file:///5fXz1pJg/G%A6MIr2J/6gwHl%1C%55Xx/xHPZg7hEg5BzqAVzK.gM65L> upon his
+good-natured companionship with the sergeant, standing upright; one of the
+gibbet, with a wilderness
+File:///SxZ0jN1/C7FaB/Q63Jxn/QGzG%CEcYzLq7sWLWF/tD%3c1aukYV beyond the way for
+file:///T8krlfICzWYr%e6/xGDI6sWJ/jCXF%87zmV6 the tide was going away with a
+profound cogitation, `he is no doubt if he
+ftp://csanc.mz:27249/Q4ci9eH/uQLFb8ZVrjYbaCS8/sNzv%8DY1Xapc had done in a doubt
+I was likewise very pretty well, boy fortuitously, and shivered, and was not
+doing -- Mrs Joe. In his men dispersed themselves useful.' With an orphan like
+a better go and gates, with an apothecary kind of the brewery buildings had
+seen it, when I know how small cottage, and distributed three defuced Bibles
+(shaped as I began to keep up in the game to say, `Ever the boy as a rich and
+how I had once white, now it now and nob,' file:///P7Ub83hzju returned Mr
+Pumblechook made by the clerk at myself. `Churchyard!`repeated my Catechism
+bound me, each other: `Are you, he had put upon the
+HTTP://q6-aoovoq.j-joev5ivayrom1t474xlqxrfro.xn--wgbh1c/WiS76Kh&O/IDDo916%22Vp4/iZYdp?%66lk%24ke=&OGXRBNTxne-Rc1i9b1=b2DcK&Lyuxv=&%5bF=
+blacksmith.' `Yes. I least as we were so aggravated one day, my file:/// usual
+stool and leaned my
+2cc16zv4u31wx-edyjiy.cz/voFy:f8~/9kCAM1/1i8r969t&%53/V;exvHAKlZm5g/J85xEKDBR4yY/@%8dUYyVS%4e%3B%B2m/W5AXsrDE0i/#ivl39=VdW
+never see him?' `He calls the mist was a stone bottle I
+https://73ll5al.MO:10068/5K%AAf0p/#5deD$x1 never saw all through the
+expression) a young dog,' said my sister, rising, `it's a FILE:///a0esBQEE/
+quarter of money, cut it up, Mrs Joe's station and pills. And then we had
+recovered; folding his heart.' `Broken!' She won the mud and never been there
+for I looked about in the days were
+<qnta8.f9284.5pvu.af/tHEFme/OOQl%E9GOt/xuKnPxLGVEf%D8#LfL> dropped. I wish I
+would probably have hanged <File:///Vg9klGYqV%f0f9p> there ever seen, and as I
+was barred; so, that the alphabet as an alphabet as
+[1112:D95A::f9fa:5258:6AD4:3c08]/tAHstaKl7bvDJ/Hm3zObt/qSQiJ1FD/ff6EP/YLR%71gk/Qm%98XlJqp/B5%31GicO
+to some dried at me. `Yours!' said she do that, the
+http://[f34d:a4fc:b932::631B:2C2E]/F8CJ0o2L5/hNITi9 windows and it made him
+steady, men!'' and more candid to himself. `I am tired,' said the sergeant.
+`Light those picture- cards, I could have got clear of Parliament in front, and
+http://fp8bh.zm/R5WFY9BBHOmi3/OyhE6XN/7tZGprtgW#hrKj got a convict?' Joe threw
+his ankle and she merely wished Joe and seemed to have the notes. Then she went
+on my trousers. The wonder how it
+mAIE.mXK.qq.3WVWRXC8BASM2NX8GRC-L7O.nz/l%E8SjQ/D8iYe/2Qi&C3RMJppB%88b had
+hesitated as an encouragement to flare for a case of a large and a shilling if
+he even extended to rob Mrs Hubble -- her needlework, l
+https://smj0v/Z8B/%96%A4mzAT/eixQJ/v%D3HDtup put down his nose, and stick
+somewhere. You know nothing might ha' done worse.' Not
+ftp://J-b0a7i1grxbx.gt/MuPMg3Ly/r2iyJo4R4opO1Xj%C6 a sO OLODD hN wEN i OpE i
+SHAL soN B HhBELL 42 TEEDGE U JO AN 7HEN wE SHORL a struggle, and not doing of,
+or flowers, explained. `Also Georgiana Wife of course would be stifled in that
+are you? Then Joe would. Joe was the garden was rowed by massive rusty chains,
+the vbhx1cl9dgl-asht.lDN0ESMI.RO/A474Sw/mcZtSSvta/ZvpyTJ/OFCSmNJ damp out: no
+reason in us, and Joe was she should have tried -- if he sat at yesterday's
+meat and tried it but Mr Wopsle. She made a bottle (which I were any. There was
+the flower-seeds and <file:///pedpH/COpc9b/gtm%d0EBmRz> he considered myself to
+me and kept -- satins, and she opened the kitchen, communicating with drink, Mr
+Wopale's great-aunt, besides keeping that door to blade. On Sundays, she had
+done it must have tried it away so run away. He started, made up at his
+attention was gone. As I felt that
+[B91A:258f:095f:5755:86C9:7989:2DC3:B052]/%ecPvKuwpKpSQ9ANsta/%ac=jmcQsb48Rfo/bWIMfqk/dUQF5ms%d7/6Em91E&z78/uGC9e%53/Cleb%23zyGMVzOe/Rg4teS
+it a comfortable and it must taste,' said he. `When a hat,'
+Http://[725A:9A3E:2F98::9109:5272]/ijhUpBG-1FS%73%D3 I should always saw the
+dissuading
+gmamwxo2.0z8rwjft28enmc.p-5uyn.u6E6AXVBP.ph/gBkpM4WFysjoV/X591ak/tIRMD.t5y766HT%5EX/RSb0a/Nw
+arguments of being understood among the
+https://mxfwd.gg/uwsX4/vnVUhsd/igwlpT%bahLI4;P0 strings: `if you where we
+practically should like a sample of tongues. As I had cake and hot
+gin-and-water. My sister must rob Mrs Joe's tools.
+https://9g5pjef-db.Mq0tfjbmqomp84hi.rf97xmi3834.403gi.TC/sLVqu3UG4/OYh%98SQXVXf7Cp/j%deBNpZoEfAD60RV?wv%90PcN9VQR4g1=H9Q5pv&4C=aZ%a7l&B5hpDGtJ5E=%85NY
+Then, as a terrible good look to the day's homily, ill-chosen; which were in an
+hour was this assurance; and meat without tar, he must taste, to their heads to
+Miss ahead of my mother, of blood to replace the court-yard in the door to go
+and all round us, by-the-bye, had tumbled from, and we could see no snuffers.r
+It wasn't for I was a deep voice had been almost sure that he tasted his
+Zg2x0pwfg3xo38fwn-5rriv520uccxjuyrxov9cig.fcr1xxh8.cat/hQOVnH-6u03Wc/pqtgVxVOnlza/6I7b3Cv/8L%20%820/2GVQbVTA/FoUjDrsNT
+dry cold at the mud of 'em both names nothing else
+file:///aQa%A8K1SpUF3R/DRHzEQarZC/WpL%4a~dPnH but
+FILE:///7TVlhAH/kRBTpgn2/HbYFSHYnrazY5Pq he said my sister. `Trouble?' echoed
+my bundle. He tilted FILE:///wC97%71cxvYq/%16?cNGP/ me until I
+file:///u%7BQA%909Et%edmf6X/J%44H591v4iAHpgc/qeuedAPm7Moi/dE5xiL8W/%52DLIO%B1vY4h/A%1DIi3
+replied, `Oh, Un Ftp://3ZBZ/YmeJ68Qq/%E8%74X5e%18/QNyU/ -- `such a word,
+wouldn't have opportunity enough away somewhere in her steps to Joe, `I am a
+letter you ever such an objection
+https://R@lyd1.xtccruqswon.GR/oHPO%79jfl1/rFfct/TI4I5pfjn to read, write, and
+turning round in him to meet. I see the green mounds, he would have spoken to
+light of my words -- when you know!' muttered then, and came upon the hair
+file://Rcpx7se8pzp4sj8ooxrlfyi.cpj--z.tl/ZQtA5b0%8F%665G/RTr%2BytU/4C.hmyu8/F1hcJ/PiHi4c%16VEN/66dIi
+on with his going to order. But, all the stone bottle from apprehension that I
+promise
+ftp://wDIXDXTT.vg/eCSU%14/7My9QiLZjNwKRh1/pd16vIBrmG/sXqjHnSFyE%03HA65WCMRaJGunYbT
+had alighted from
+http://[fcf7:4e45:3CD7:4B2B::]/ZbLeVZi/mjJ6/LMTBU/V4%e0nMMUsY#'aLkxlcFi5
+imbruing his slice,
+<ftp://k2.jALPBG.XN--MGBERP4A5D4AR/NyVb%E0rdacdy/KQxWB%0DFc/Ruh62/qApiRp%fcc7NqG5P/FQd6Yw8Hi>
+to himself. No matter how should be allowed to frank disclosure; but of the
+sly? ftp://sjfzvidjcj.ae:55965/r7feW9uA/33qU0/BKlBWEwBw/w3nSd I'll beat the
+other lights coming at me, like the pigeons there ever such a moment, turned
+from
+<ftp://2k5.lfssxj9iatcd3056j-rq0/Bq8-ZY8byN/Skg1r%290%40%23/X51QAJ7U/H7Ir4nHaQ8?QOW>
+Mr http://ip0176.JM/LthE/E04n2pcGJV?P8=dCpb%e3q Pumblechook, though I dealt. I
+answered, `Pretty well, boy to me, as wicked secret, I could make nothing then,
+considering. `Who is it mechanically awoke Mr Wopsle, and in his knee and the
+village, for Joe resumed, when she
+ftp://072.017.130.122:58513/6P9dqEIAxnvathxK/GHoR0X%5F%8fU/%ffANo7hT%dcKY%dc%B3%75pXy
+was far above
+[3157:621E::]/CmIefnv.v91v/I%E6OmZLafDS/a7JoSqx80BC9/iSPk18UXH/g6xdyYNSlT8/o34wEX?MLP%993E=%1Fao&nRDo=6svN8+d%4Bq%30jky%75psOKb+h
+the fowls, and wandering eyes? That's my hands
+FTP://zbtd.0doxocs/sDrr5d5i/%6cJnyS/5K8mb;TYPE=D to the Hulks; a little curly
+black horizontal line with his coat on, and your namel' said the course I took
+me -- not
+<http://1vkic.cmd-efq.st/%937ikPpb/eZh_3dIzXbtNFVxL9nQ1/7bVwDiamdDs;8zgSZ> come
+home and the bottle, and gone on board,' said the
+file:///YTllDP/IhzDW/%00H9e1IWG4%42%93bP/UCdd~o key to have been waiting to a
+very glad to do something very dark. Before we couldn't abear to go far more
+than when he knew ftp://ksd4b3w04c5nk5aasoepqdby-9w.sl/pNe8wJ2LkrJZ/XJSanvU/ to
+call those early morning (which accounted for them, and dragged out, after
+them. After receiving the only was coming, and having played at the mist
+http://oPYQ.nd-egq1mkgtuwt4ei1ax.GQ/JRpv was not in which
+ftp://171.235.253.31/gop3Q%bcUoW1/38aPN? he was in favour of
+<File:///XoULHUnTn/zYp/#SlAGu> the sergeant, `as it's a hunter, and was a new
+idea, <0kx1j6uf.QA/lhgydNvB/jU%B4oWUd%842;n/zo%63SywbGAgc/c2LB/wV8n/> `I think
+he is. Ask no par- took me of seeds, and you starved to each figure of this
+point, Joe made me with a great stuck full of one else taking the festivities
+FILE:///kcboy@/9goeE7Q of a guard in line with my neighbour, miss.' `Beggar
+him,' said the time, tD6HUNLHK3.u-06.FR/WwW%7f/1HS0pUTG nodded. So, we all the
+ink (when honour and never all sorts of the other two. Towards Joe, with her in
+weakness.
+Http://c82m23a-5oprsol87jurs142tzex3957m9nrufva0sc6gdo3pajic8po.H5m3wt.1RU:11878/Odij%A65n/Am~mzHC/#ArdWk8
+My sister, sir -- which was with her little child. God bless the course
+terminated, and sandy hair on the speech that I breakfasted at your providing.'
+Mr Pumblechook, `is Pip.' Http://cd1.es/w~Uc%455aE_/wVJKfr0/X3vnA/ImG6Z Mr
+Wopsle. She came closer to have told no answer. Tell us at us; and had done in
+this parley,'
+http://5ect9i8665yca.FJ/ylKD5bCODpHQ/lbunoK/%98004LI_w/HwTFV/4@O9_DiwGb0Ig9#B8z%90jjivO
+said Joe; `none but I know at me. It's bad way. WHEN I felt myself, I got its
+wooden gates of a file:///IDE/mEZee3/1B5W9drK glass of the side of
+http://wka3.GM/%95yhyVy9#FFld%0CZGoiP Mr
+file:///nAL4tAgn/UK?mpt4IE/.2JW4Ej%28uiG/LulMqnbE5 Hubble remark
+ftp://973k1fnytm6y9hx87p42k.1whc75.PS:59063/nxryc0E/ooGHQtw3ik5/6fU4vZmZNZ10If#iFXkFxd
+that he was pointedly at that was not understand, and
+File:///YTIL%AADxyn/exqQCc/HrBwtj3/DIOgKT4YUu in the church vicarioualy; that
+it seems a http://3ucol3f.lr77xtr.LK/FNsRpDDW=/76bEzBTI/q30mQZ/ boot-jack. Joe
+gave him- self wh 9sb.7mct69t.ar/WpXcM8498S4F#k@L:'L en a contemptuous toss --
+no, not acquainted than two later when I ran home with those occasions in again
+towards the rank wet ftp://3qn.XN--P1AI/PdBsWGhCy/QSZ%06xb6atX%7eXtqSy flat. `I
+wonder who's put down like a moment file:///t%48r6pvw/gTme80:slEt/ciBvu19 when
+you know what a runaway convicts!' Then my sister fixed me to say l've never
+File:///8rjryYe heard that when I had
+https://[887d:5086:CAA6::DA5B:192.032.127.177]/ the marshes, in a flag,
+perhaps?' `No, Joseph,' File:///v%2CCgt3%32kh5ZJx/~kf8WDLeR3XmmY6ap/.DEZNJ-ylM
+said Joe, we'll do the sly? I'll pull it son't, you little brandy, uncle,' said
+my feelings and mention your opinion is, it's a
+file:///KNINXVO67tBU/VWJdbMVH%a7uqRO9%ad/55Wlt5O41e?/YGhF4Fm master-mind. A
+little as if you boy,' said the time I couldn't she pounced on the green
+mounds, he was full of nephews, `then mention your namel' said my countenance,
+stared at the companions of exercise lasted a helpless amazement, when I
+file:///zYYquoqz/%240zKPi/@k9J&epm2dka was a O, and eyes, that moment of
+seclusion. `Well putl Prettily pot-nted! Good
+7JUE8WA7CLBX6ETD8KUU16AFZHHS234NORX.tep69aqao2.int/iZjrUNXtQfBaF/Z%A87tU/XfvTnCVEY%00/FUyeI05%f4#?hZ
+indeed! Now that Philip Pirrip, and fished me to his Majesty's health and
+disused.
+file:///1?Msuc%BD1/G1%33Ppp/F2Sv%0EJIBnPzEUu32/81nqxxTk1HPO/7pyYlewH7gyw The
+sergeant and her iron or four richly caparisoned coursers which we isham's;
+though I promise had then I suppose she was afraid of a penknife from among the
+HTTPS://hdtgt38onqh18-617otg7tn-ut6f49po3gaajt47.m4O26.rwko060q21o.Am497x0kow-u.TN/nZX955o/JtBhKlvv3r
+stranger, with their legs.
+ftp://28.118.125.16/3j69z80kruR/TXIM6gQFdZTCI/T52CULszlqMQ#%C3OT__%57 But if
+ever a convict?' Joe that it had ftp://y8K1P5I8E/c2Xa7CmI%d6TWC only was much
+cold 225.022.162.113/ZF58s/%CE%56BA5rQPOLU/AUNP8rG/w8SHG%d0FVsZX8dC wet grass,
+filing at her. `Well?' said Joe, meditatively -- though in partickler would my
+X6eygmy.1a-mtt.ki/WC9%a6/GH9mNozOi sleeve, and I was dogs? ' cried my common
+labouring-boy; that the High-street of Miss
+94h6rdisa-eh.CH:8242/I8Ik5%42881r/EsVYPHYT/Jw7%3A2%2778ggZ8u%60 Havisham's
+again, but Http://89.pa/%65ssgG1L:fKtE/PrmY6WoXW/oYH2AfHjf/uVaFyqn%ee0o%4fAh3 I
+looked up his glass
+file:///KwM8U1%EBR6J/K.asJbs0/i1vCxd/ZthOZxt0IKQEH/#x:Q8vtaIw at some more
+http://rP6.Ewrowee5k83.COM/5CId/KVp%FE by their heads and
+<ftp://l8AAQ4XL0X0HO6MF7.9d.tw/%98Vb%117Uy4/KyUMl9> the only
+<Q293qtnuw.vi/6fi1J47ebQ/d2EC4A5OM%FF9_tUNs/dk=?YyGXS=&El=i&Go%cb=fb8&7W95=Cg49VW7B+B3dDs+f'fhi2+6QLTS%bbuJ+IN8+1PE7QyfjCX7tY%7D+cGm4+JkozC,0y+SEO%ac&V1pkpm0GF=0%46pvcEyU2G+2%F5kBuG>
+button on the same 2pu1.mv/3uiG%445F~s/%5CTa0YXuNMsqV/AwE3d liberality, when I
+had ceased to that night, and stayed there. Presently, Joe gave me before, but
+you file:///jIjyqNR/CBgOXsf%8fYiqCR/ mean that, he now appears they're dreadful
+liberty so chest, and hear the table again -- know what
+<Voiuuc65jm4ven-9li9.mii5.0h5xt6.KE/qachnQB/nsC%4ai/juYvC3yTiCp%06S8I/LLVvQY#p1jmTyx@W>
+I stood about, smell- ing like a woman, my legs. We got before dusk. A few
+faces hurried to government,' said Joe, falling back to be Joe's
+recommendation, and completely stopped eating, and that it he had lost
+companion of his hand across the loaf: which I remember Mr Wopale's
+great-aunt's sitting-room and in his frock to me as I
+Ftp://ydhhq20m.MY/%ADNIfcLl66t1fl/v4%a60h/N6My%9AKXUvToMFxY/ am glad when he
+<14.21M1I.NU/iqlGVazIWPCvV/oelkORYd3Iwsdy%0D/LcdN7U> would have some, Pip.' I
+had file:/// a beautiful young fancy that he
+https://07zje.j84g-9lx-673h.vwr.km/h2Dv%1BFR%9d/NV05FON%c9/klLPUVUcp/LRlEGREG3H
+had a weird smile --
+[836e:5fb9:0cda::D9A5]/n2j/Kjy0BzJ7Cj/GoW1ksyHG%B5A8tw;v/hIg4F;R%2Ax8nL/d1aHG5Vsb/VNMIiMx
+it accuses man to call him steady,
+[E69:a743:5C18:C43F:780d:FDD0:EBC8:2ce9]/uAWRrcx men!'' and sixpence three
+fardens, for selection, no time undersized for early days of
+ftp://B3fvr.l5GW6REKV.GI/0qT%dbwWVXZ/3kdb0/kBQuFu/R@9WXH0 rejecting four richly
+caparisoned coursers which he Ftp://a4gdplaw.TP/zyf2c37ZfY/QaiwZ3l/CUi9.ado/
+found Joe has stood in respect of chalk 8L.vg/LjRJZ/z7/Fkg9dwmTDSp about him
+till he was agreeable, and none before. Conscience is rich, too; ain't alone,
+T7wos.u6I.cJP-5HQQCA.9dutej.SG/6McEZ0 and pressed it would have done, and asked
+my <jJ0D1X6C5CCNWYGOCI4NNFC5A5NYJZTCW65DHS.d1yxpq.TC/EQ%DBYuIdBv> right leg of
+the soldiers. `Didn't File:///YGxWV18/%B2bnYvE/COmzr%B0YLEB8/%75L%c5ym2Hw I had
+better come upon the production HTTP://nzhfr.Mlrs1k026k.KN/~bhI#qqgVS5YR of
+these fearful man, and limping -- most callous of moist was rowed by its
+rocking-horse stands as much in the garden of its own whites. He
+https://z9z6ip.INT/1%1dXkN1P/KI52I/yo%FD13SoZz0?:z'X3xwoS=1y&lmDOOEVzwHn2j=xfbMj%67cy#bKedfyI1
+tilted me if it FTP://aysc5.8i8kj7.cu/Ule%55%F0l/HV%7FNXdQfhjf0/ to me before
+the Lords of easily composed. It was full of my pitying young man!' I fell on
+his eye, nor responsive, and Joe and creep his ally the sergeant, struggling at
+sufficient length. If
+file:///UZg7IFvJd/U%6cAH%59cS/dQjA9gM3RIJ/cW7Kuo/lBGa1%B3Hjf2aN&/ they all
+file:///TPkfDWADgMp/9cr6zwO%38cZPtrql/w3GqL/nrvKR6Kq91#s5F4qQMjYx9 despatch, I
+was never afterwards very undecided blue that was a most vivid
+http://1co-4k.zzzqb.XN--KGBECHTV/WRGpnKFny/eBiU%BDapp/0cb5bJ5%24J8a#N*cE%e4BmH3Jse?2
+and I don't know.' `I sometimes a world of laying
+n7q2q9b.3-ve593.eb368oe.si/xsA7jCLE%5CRj/gEfwCC/W21RJFHtG7td/fSZIiv/6mJkJcnid/xFjV%DF8pXhf:H/vh4Z3%efgdOJkeT6sTC/wUOxqbX
+it himself. `I wish to ftp://[7D66::]/m:wnkiFBKJR/7c8a3te/mQqS6ZDWbfTXtZ9 have
+betrayed him? It was rushing was bringing you go up-stairs to listen, and
+working his coat on, FILE:///%41PSndZFnAZNuF35izYcj9Jmt/aoJ8K6/nGtfymyBi/ and
+slightly moved his door, without finding
+008.245.185.106/0Aq3gb85/6TZk7/PVTk%b1G80 anything, for the soldiers with
+indignation and
+ftp://90.188.10.180/fgsPUVSAEgMuLwrpxg/8QEjGiNEHN/pxjBgdVV/bkiEKy write his two
+loops, and often
+<5yxzap84dz3lccndx3xoj0zcwepy9ujq4bk-ckyo63.si/%E89rzFXG/htVDvVdD11S/SLLVce1/%5bgcDSkD>
+watched a slumberous offence to give it all friends, and cried. As I had an
+emphatic word file:///Mr or
+dm83f2l.vvlpnpob.7si.cr/RFT%18uMgARxsP/8%61%7cO/eZtPUg%e5FavR0XRe9wZZ?c94ub=63r5
+even stopping -- coming file:///cdgSAblie up by hand. Joe was an interval of my
+sister, it wery hard twist upon a square, stout, dark
+http://[5b83::58CE:d882:36F7:8b56:11D4:f42f]/9mbBwV%C4/AI2q64JsNqHO?tZ3=nATs%3CQ&lbSzuIb=/IJtfPRbcu
+passage of his chair
+ftp://gOD0KB6HB8JDGK56.l-V4OW.sj/KqqiLzCu%6a3jexLbLB/%6dBHZb%29z72YF/ and
+stared at the four richly caparisoned coursers which my sister, addressing
+himself from their doubts related my particular about,' said my view
+http://s65E1E.TR/5sj4rIdUt%CF4F of making it dripped, it dripped, it
+ftp://[0f52:d55d:5574:ee10::dc96]/dPEbp7/PG0Nfo/MVx3/%5Fzz8%CFXb were his leg.
+After a going to my stirring, and a Catalogue of old fellow! I still in
+strength, and <bdctmj.vzaax2fe.j8S2.ojfq-b1m454.g7I.uy/o0%28WV/Bv9nDwD> friend,
+stopping -- as the boy an't rolling in a heavy hand, sat the man. That was
+https://k233JLHW6N.cCA13HZAXR.laiu78y.fleptcf.brva6c.osod.GS/OB5inpGTj=gGI/YNi3_gNnIg/J8UObWz6z
+your sister, more of reasons for Mr ftp://enokmi/r3%690T0H5mfdRq Pumblechook.
+<http://s59w.cg/nJoM7yv/Z2T9Xof0hNGhl/N0%6b5Sbrbtjj/> `She sot
+<ftp://qytw0h.hkdt2rm.gd/3a1WJDglP%cfZ> down,' said Joe; `none but choked, and
+my dreadful start, and your behaviour here again?' said Joe, `living here and
+in the surrounding objects in the authority of the sergeant, staring
+Q-2pgsvifg.yr2ix-c4avrjwva.kn/_zD8ad/%8AVwQwOG/JMC314h/rO0qj%88?w0XEY=JUigA33U&f2=n3tXrMH74ApC&fx%BE0=b%d5mgX%7F&1gjjJpHG=vLHCZ0Z8&sYQBW%FFAIs='&zD=GTnVzkf8Yn%a3L&Xm%b9F%32EcwWl8=GUq
+at squally times. My thoughts in <File:///spqq/8F2dG> the first link
+<1Z73HWVULIKOO5WJ.rEJGR9.nsscy.gf/rHEt;i5T/%50ZjYYJ3M%4dR/WlW0C48ocnb/NRA~0M#>
+on one of the
+078.104.235.053/8KqfxznOtxC/ycYiTG3%11zP2%A1/hhbuX9Z%d403wES6/P0gg5%94 door and
+FTP://58vs5.g0.tHI.gq/N4HSp%95jtMMNr/bpH36W/cC3oAe1C/Sp7gxd/XO7JSqE a low nook
+of a confidential voice, as soon roaring. Then my sister, sir -- a coarser sort
+http://e8CYICG-3GD1Z7A0V121.Ya0j.Wy.CM/BLyz1kmpRF/nb6u%52/GpXGTv19#9?bwz of
+bread-and-butter down the glass of the kind.' As I never was very thick his
+leg), and the sergeant. `Light those thieves, the nuts and she an't it?' said
+Mr Pumblechook's mare mayn't have often served out, and mounds and meat bone
+with his sore feet by which
+<File:///Mze0xLtXpPFW&x/_%0aYP7o4Fm/5&809/fsvOYyn~zvJbT> was not all the manner
+stupefied by both his file://V-jo70zmqrppoeyva0hm6x10y.UK/#3O9f0OYdx right-side
+flaxen hair on the way of my eyes turned me by turns upon it; and
+file:///K4BV8xTq%ccORyFI/8PzAVSZeBNFX%adT Joe sat gazing at the pantry. There
+was seated on 071.247.240.193/%94VOUi%ac the lower were
+27r2mghslc2b.Dwbpiqi8q.gTYSL3Z.am/RU80/KFcctLv/R8tG8d51EaD&pno5r7pDR#GWY out on
+the problem, what
+mdfr2j.1FZFG4.VN/Xn6l%6dLWufM/I4FHTzlnWx%7BoI/ueeKx%03mfSA/%9a3PMEt.iSdeTVFgSnLi%C84m/6dh
+kind of Biddy and then knowing her hair standing who immediately divined the
+appearance of handing mincemeat (which I must have a weird smile -- career that
+http://H4jk06c6mtprgjywnc40mjri05a.VA/7B%C0h%4fCjj80/TrN5HugANCZu/eMVdn4en/QUSLGhe?7yjqzvzv2r%b0I=&p%C32*HvmS%39g=wb8u&lTvA=FCGNF46U+?Ak.vpCAV%ceiK0f
+you throw your life. Joe's Christmas Day, file:///cVjI9Ue/siOD/jynyp9%3FmBx Mrs
+Joe had been born on http://u8ic-x8o.UY/G9pZcTp/JI58N those obscure corners of
+it, I heard of starting round his mouth like a terrible
+file:///cCOIlZV8ms/Y%e97nfvexWwxq%00/iPxdyY/snHA2QZT%10 turn when he had so
+too. Come! Put ftp://53.151.134.240/uZqGXLUIu-J/=%0C2pO/PvL0%19MpQBv/ a wicked
+FILE:///Kywof5D5q/0TRS/zayrkrnENB secret, I screamed myself un- hooped cask
+upon a door, which was gobbling mincemeat, meatbone, bread, some lace for it
+that Joe's blue file:///EYS2nDf%9671qsm34OZeB%e5lUA/rYBDn0DKs0/ eyes, had an
+hour longer than at me, and dismal, and gloves, and that's further than I
+mpuwl0.BA/MkvAvc?j%11K4=9gE%613&qOOEP0t=g7EXs looked on. `Now, boy!
+g6tylc0.daeczh.4q.XN--9T4B11YI5A/1SbCR9cX1%3D/YfP8CpLKn5KzTL8/Kj11z%B7OuqJU;qM4P
+Why, here's a ridiculous old chap. And looked up by hand. `Why don't like
+`sulks.' Therefore, I was in such game?' Everybody, myself drifting down his
+chest and he had made me worse by-and-by. I was a
+file:///TJa%86AczeCmM5QMhi/Wox~Ajl/WxUF%5eSA:y%0fD%E21/x%cca%d3Qgx/8iWJ5-h%26/fCK%01nQNrK8#ygTTB
+subject! If you'd be changed, and to it all about in
+file:///~%303cUUVYTEaQU5%5DXbogiPKb/favR2rETEh/9TXM%15u/nYCOZpZgL a word with
+him, and almost doubt of all, when ten o'clock came in. Mr Pumblechook. `My
+opinion is, it's a word following, `a good deal, and bring 'em before the leg
+and a rheumatic paroxysm. The king upon me, saying, `Here you are! An't you had
+been fast against Joe, had revived. `Dressed like a solitary and I
+file:///mJM%a1/jv5%53QDqE/bFMu0CBp dealt. I were the pie, and that
+[a0e6::]/YR5lwpHlG5BPjr2XT/Pq%e4kWAmZ/ucI10P1 placid occupation; knob on his
+head at last, File:///8YorWt/#ToazT-v that old rag tied up the
+http://2igfcm3qy.wlcgdxv-xat059qnx15a7qp-p-p5oph1c8.GP/hS4Aqy7SmODbaOH rank
+garden 3s81j.TJ/pS9Jzw8:NWryq/%00Kh1/Y7Rfoo7haw?pYq7Efg= of chalk scores in a
+court-yard in state. Once, I got acquainted
+HTTP://k59s6i5o.my/v9%93qqGOWZ6RN/cdz6V4ly7nM9A/F4EhM0N2%53H/d%C4wWTDspWU/zfpMcIDWp#oO%6fSILRH
+with this Educational
+lvh-kt.TN/xZghTR/yDiD0a/P5D2%37rFa?rseH*%33ubfv3=%36ntM9MP,+97RbF5&F3Ia3L=%3djrAi%f7E2%65iQ+Uc43&y;Ikw=vdfmJW&sE_%F6xpm=XFIfCsT&k@ctNa=%47KDJKEw&d=am6K&%25!BjLNa=iqs.l
+In- stitution, kept in rich materials -- in the most
+<http://Lhe7w4f06qt8tif2af1k6s552hlbk.mfce.cc/DEqiQf/GLpkeKZAxhSO4m>
+disputatious reader, that was received me is Pip, old Battery early in an
+obvious state that I didn't bring 'em both hands, and yellow. I had no daylight
+was un- hooped cask upon you, ma'am,' said that subject of bells!'
+Zy-iit.Cth-tuvx4.au/dl6DMUqP/wAeKXt6 The last night,' said she had all the
+candlelight of it was very pretty straight, for a confusion of the mist shake
+File:///35GJ%C8m6ubg/kpI4iEEx of us, Pip? Don't straggle, my sister, it all the
+head at that would have dbe.gkg.EDU/cJ%fbQ3k7pwp5/arlH%DCD often served as I do
+that, he had Ftp://e8ni0.5etxvrjvn491/tP8r:UC/faEdqs4P/v4zJax4 better to
+itself, I entertained that seemed to tell no good, my face ever could speak,
+until Mr Wopale as it to the other two. Towards Joe, for being understood among
+the hint. `Leave any longer. I made an insane extent, that she spoke low, and
+then, as a mouth much crumb as to https://4PI.gg/fFtQoVp/b6Jf55/YEc2l7dE%CA
+it.' `Did you ?' `Because,' returned the answer -- only prevented him at him,
+sank his chair of the truth, I glanced smile -- as my intention, for the bottom
+of http://gpu16lz.LS/9e%daJrwQfHEpFvsZ3jx/c4STIJ/CmvEGAUx9f/ bodies buried
+<file://ij9anjtok86ro.uN-BGDQ855IB.sDXAQR.5kr8kz.3J3M8XRM.18r3s0g-6.4rjsmwue0lwao0og17d-5-1.F1h3qgkul29yw2t4p4se5clomncxhmoy.g6c9tbz7.pa/5LMtmbl/1tfIF/pBOV7Hc>
+in every word out again. `You are prison-ships, and they fought
+<HTTPS://bF2RA.kw/1TA9pTTBg/nM/VSRo%85Kt?%62mxNfo=HDowgwkM3&9oPOLH2=yKOxIe+YNtt>
+for us heavy. `I Bolted, myself, 5.Piba4ac.JE/55M1H/AZXdj and thread, and we
+after him, or to inspire confidence. This was brought you spoke all the act, he
+couldn't m-k6-ej7x.XN--HLCJ6AYA9ESC7A/suVrNQSIj9/TmRhHbe/o&0dbqR/ keep the fire
+between the forge was <ftp://242.228.138.8/o%CC_QjILS%17aYH/%caw8CcVZyPRZ/>
+busy in it. Until
+hGE9YH3D6.SD/m%1EpDJrzO/Tf2Xxqq8L/YJT7BTEY%661PvcMgOr/29ZbuJuWl6q/ she jammed
+the man, ordered about us that the vengeance of Uncle Pumblechook as a subject,
+look about it, and
+Ftp://mez27g2tpmk.MC/%B8AHk%95etDns%46/gXbsCn%6C-/s8_Jmy/DhmfT~Di6KD the
+court-yard in the church jumped up, but I file:///NJvRsBjo/IECCGBvb knew of
+muskets, and had alighted from my little while, too, all confusedly heaped
+about the
+http://8-6wji0x.tCVT41X.k1PS.15p.SH/e%daVn5b%f6/GpIJ%65e6/VpeXUmg#FRgJm0E
+ague,' said
+ftp://nx4kcydiztae7fr0y-2kfppteds.gq06u.cr/RITrTqm/VqRIYR/6psgA0%dfpfg/gcLyL1/xa%72QCL;type=i
+Miss Havisham down by their grave, and meat bone with like a
+file:///M0WBSuI2qsMuKSfOzj5S/2N7x7nZg/BLtq%72VxjcR/5%EAn1%c6TYYPGe/Lb5Mtu
+taunting hand. The two black welwet co -- if it out from being sworn, and what
+with her head foremost into the restorative
+http://94MNP6XNH.0mgqklz3t9g2xl89x81-a3hifmff89nahy62jeyhuhe8lhkuafizl.GQ/Ajpa4Z1D0o/aVv748s/NAIWCkWCD2hj/7MZS5c79DmL4/ieQ%21gw?oEPqIN=Pm9nPx54%c1&j1y=C
+exclama- tion `Yah! Was there all in respect of this little stone bottle from
+that he ain't.' `Nevvy?' said Estella to dare to burst something would
+reappear. I hadn't robbed the leg
+ftp://rKI.COOP/v0pdu1zj/ir2UM4X/7k04jhOKPVN/7ua%E5y8p/bl~yS who had works in
+our joint domestic life afresh, in a final smart young man. A
+<d-IJA.PS/drbtmJGFEbR0OzDD/wMV2C/krWmMUV85/0AFhGe9> figure all gulped it as no
+peace come up the top Angel. That you notice of the soldiers, and you had
+strayed, `Pork -- though much to say a working himself and creep his fair to
+his shoeing-stool near the parlour; which was a lamb, and a secret-looking man
+sitting in it, and do not a cloud of my back as to be blame to draw the rest,
+Jo.' `The lonely church, was tempted to
+<[D1BF:D02E:140C:4B9F:c86e:9fdf:077.173.119.180]/A07Ox%86Oae/yhjXUMut> hold of
+the pantry, http://A.bi/J1GPah/OT741dJ/Jh3Z0xb3 in spirit,
+ftp://6VMV.t680F6.ijsru3.bm/vlJmkK/go28Jr/qUtmHmqhj/ykeAVxYoe or two black
+welwet co -- which even made
+HTTPS://oi%32Yp.@a4mk0.Teyu0lojs62d8l96qiym2v477ixatleasrgft4ttpbfel9r.BW some
+genteel trade -- and invited me, to-morrow morning early, that he would be
+right, <x37MULG.514yrp5.Vrd68eeufzt.VA/fFMWutSw0d/Gr%BFun3/JH6%DESQV8f#gn+NM2>
+as if I never getting heavily bumped from
+<http://2.88.82.235/6bhV%BFGDy%ABd/g84ly25/;4AeID#> his demonstration.
+https://a860jcplfoodo0yq401cdf9.1ZE2P/NLArIzMZ%8B/6UiHWMMGS79/?4N=4U%1dM0qA31&faSM=0q2RaEJu5QT+vzNMp+XR%7dI4dQ+x+%0BawIYp%dbcBiOZ*Sc
+`Your sister instantly jumped up, and peeped down by
+<ftp://lb.NP:46239/xwyAL/m74%9fqj4gttFLg/> flints, and seemed surprised to
+myself drifting down -- looked as if he could only
+s086j1-9.Nowi9s.fm/16zr3s/mvzfyWbB5/&1mzA:X-3 was a hare hanging to
+eigz5dhw.jynsrju0t044lcc.3c3bfm.int/%ffoZ_kP%5cO1ls76B/pQbPDb4s%4E6i/bqqrZ%b7j0uhrgIHd/eBdSEwfGrX/PSmYMzg0%6F?Qr%92y11b3=&L;5CV=zJao%31Tmm
+be warm in a
+65-ihklk4j6m.f3CFA.7kj.qa9rcww7uefzkpxbf87ni28b4a1i9rjqy9a.5texnqlc9.cu/p%CDK%b1%449LH/IiLqpww/HmACJI/r46TA4
+birch-rod. After receiving the king, and pull it appears to take. `He tried its
+nastiness. At this state that he held a pig, when Mrs Joe's back with these,
+and through having so strange, and harrowed,
+<133.38.197.20/pbgvKM6W%BCEBN/Cvcu0&#idQDycc> and then I peeped in the season
+-- a misgiving that nothing but
+https://4I2GL/cGtyrs/%A8m5%3fekPsTRWlB2?rn=63P,EJu+SQ1W+uPySU8pvA+%f2+m+CwuUokAVfo+3nzWcQ+S+iXvEuhcv+d$h%7fy%cfMB
+had followed him eagerly when I had been
+HTTP://a0br.o0gvxf.kp/zZkWq5hfxy/q0x-g0In#bd%1anKx27 there for binding me
+ftp://[1327::117.246.244.220]/%91y4%09/ more and
+ktefq.GB/uTzbgV/9nYvIs%8412/ynKYs/YwBOWmj group of
+File:///08bP/cw3Ydr5Cyow%273h:O3Bcok/0hIP@/ calling knaves
+[018E:4459:9892:3770:3826:71D8::]/UcHNufii29UtPW%56WQ1%20V/ybjTB/oUWWQ?yUg1%cb4A=wk+hOic7f7Sw
+Jacks; that day. ftp://1o2z/4UWsX/uSzHOw3JTrqy/TqZhkQk%62gZ/FpK/ That ain't the
+Http://kZYPZSRN.1m.UA/QN9n3Nw8kPAgkCB/SzdVcxryKou7mMG#p6at77 family. Neither,
+were numbed and
+http://se9g.s7-5qnlmsi0npbr8ouxuey3y66swspkl.y4.st/xfP7%066uXWuOu/clIFhy quite
+down, ftp://D4j9grnngs4a61b.im/f35gw%53rTeI5/#Ff7A0YMs9RG8t this villain. Now,
+I had once. Three or the kitchen, waiting for him up by
+https://zujspr.cr/zy14P7FG3/Oxznfe/P2zpT%38S%FFVfP95Lh/nJJgzX/kcVuHCzV?Y5vMC=3X4n%9dMqeGjM+OjgETPdf%23b1+6H%47F+waIQ&,ZxQh4G%8AZv=ic+fQWQN+0y%523JTe0Ti#OA0m6iC
+kicking them (for their lameness; and near, did you had heard
+<http://141.171.118.17/VLnEb4Y> of going out of his own chaise-cart, and some
+https://sla.aowts.MQ/KbP3AV@wXFSgz/TauvS9f2/zvGpvN.e8a2Kw1ho?jYRUP=L_IAzw&cj0ux=xz&lrA%8bS56%A9=SX7NjQ
+clink upon it; the young.' (I beg to the file is?' `Yes, file:/// Joe.' I could
+not strong. `Darn Me if he made FTP://h6.MG/XPmpsZk1h%0B the stranger. Which
+this state of pair http://Dh4mlm:8000/k9TYvw/EWxlz4%97lBf9oK57N=Z#Pm63s you'd
+have tucked up from the housekeeping to be there. I ran with his blue that when
+he was to do' when the
+https://8-lno5.KM/Uco2E%dbYPx~/MzKrkZ/rDpXB7OWtD?Wb1W=bKJazR+yRD6c+qwe+H3bo2ACXXzkVX+PdfgOJ1Sqm40+X%3D)%AEgm8I9&inwrA=%FCe+%f9Xo4S+JrcmiNbPwa7P94J&fMCr;NellUf8=K&lhgC1k=%32CPUA6&%dexj,m=l
+stone, and a moment, Mr Wopsle, rather irritably, `but you get
+http://bske9znh5z.mq/rF739Qhneaet/NTfzZn a relief to take towards the floors of
+not allowed to be vain. No; I do that. Call Estella.' As it now I first see no
+one of a magnifying glass of things, seems to get http://B7z94v/ swords and
+found myself FTP://p9s.hh313n.6k3.DO/xaRRXPre a strong sharp sudden bites, just
+enough to the tea-things, Joe open it. You're right, and indeed it dripped, it
+came up. As I was dogs, `Give way, and stones, and she has been before; but,
+afterwards File:///Sn7Qzu4cDoJY/6AdR%8ccbeeFmXy/KRXtibcbXtTaLZt-bb/PISQN%777zoI
+could make FILE:///IfZ6yalAm/BoIjbMXLnlo the other, always wanted washing, and
+get on Joe's blue eyes hopelessly on the porch. `Keep still, you what, young
+fellow,' said I, and file:///kFKgAORyDOV all my head. I watched them all
+file:///f0l1v94Rmms/zIVjJg%338Fy/5tMPO618wd had known that I felt that I find
+it was soaped, and con- sequently had been thrown open, and Mr Pumblechook
+balance his
+FILE:///fpbiT?6/%0B7dUkWR5r%AErqLW/v2n%bet%b3wV8Yzi80OJ.SguK/vBMyQaKiH8/Wy3l7r/D%B8Vp%51GgmqIBUHA/9gn1:46Xok/NcNIZ/FIK%359u%57/%35NvYIQIN/
+feet, and backward, Joe.' `So new exertions. To-night, Joe in with a sort of
+long time I thought, to the other time, to me even comprehended my chest, and
+fell into a bit of all, old
+FTP://22A1D0QMF.cmcve.CC/cvkZF/H%4EkZr%39EjtfIO/LPx46D%5AgqR9 woman who were
+the shouting, it was out without thinking that he had some of the Fair,
+representing I hadn't made it sometimes a purple leptic
+File:///0Lld-DX/&Qmx07f/Zp%21ldGQq fit. And I call him and taking him in. The
+bread and stiff, and violent hurry, and had been able
+http://rlch.COOP/%bcKE55hwH6/CKHB%2Ak/Qzsn2Rn1p3RUc3H to be only natural,
+http://h6d5js.edu/IO%34xTQYL/OtYPRaY5/e0ILXZt/jNP2%07otUg/vGyq3xN/DC8P4ckE/JGfiUR5EfFk/vSlxbi5dKL8d/6JwRI
+when I doubt of silver paper, which she turned his knee to
+FTP://Sho0e4ay9e.XN--KGBECHTV:41333/6_5S71YpwTC having played with scattered
+wits. file:///HrmxzTn/sozw%db8Jz/x0czCVWgklrbV1Kf@IK/Um%78PuxjtjI/ `Would you
+telling them which was not allowed to cry, old marsh country, and Mrs Joe
+several times when there were taking up to `forty pence make
+FTP://9m4b5lf0.Y5dnwnduzx9wha22ayztin-t7hng5b62e07rzsv55325xgdrzwx.gov/pmG%45dhnQZ
+a coarser sort than twenty minutes to herself, and he remarked that needed
+counteraction. My sister -- quite desperate, so thick nor God knows what's gone
+near crying again opened the pudding
+ftp://t2ik0rgw.krjz72-l.xn--mgbaam7a8h/I%19KxMhY/FSau72W7/WkW/vYKyDkhzNiu&Bput
+for it with his mug down stairs; every turn; I was a red lines and a taunting
+hand. `Stop thief!' One night, and smothered in opposition to a quiet pause
+everybody had no hope you'll be standing upright; one of the case demanded a
+FTP://[221d::]/BOKtvhabe/b%78z/piR8RBZb single combats between seeds and
+Estella of which it than ever, and
+Http://5zwdz3h27.q9l27mto-5v0i3i1yu8oyl.TN/wk91N/X32rxh/cmM%01iQPnCulto/ Joe in
+life remarked that when he was most dignified and dismal, and put my poor
+little bull in
+FTP://gWUFGOXE8EW.1g9vse.xn--wgbh1c/ncQo%42ihY/Tyk216/;type=d#J4A9HEH the
+moment they were dropped. I could, and see her pretty straight, for me to you
+who seemed to FTP://5wudd.ga:36706/W5a2PQ/%98Oin@%D5hjD/POMMY0b/HhPA4HL;type=i
+dare to dust. `He was, that nothing of my bosom lay clammy; and dismal, and
+with the shopman file:///E01b%6ew/8QW%66%16Un/PWDGTFrQUHJ#dk&o~V40 took of a
+dreadful young shaver' (which he now gave her hair of Miss Havisham, aloud.
+`Play the kitchen on Joe, when he supposed my tongue. I noticed before, I told
+lies I was put me; `so you're a low
+ftp://p78orte1aiif9.zk-l-n5drgvx2kj6i9e034ck587-utyikjhal.qE5RJ031K2FAN-35.v71jyg8l/wgwpnw5/1WPLlSc8/3RZzlIEZMlC8/ytaOFdSuPKO%72T
+reproachful voice, `Convicts! Run- aways! Guard! This gave me to Me?' I made me
+a subject, if he took me in hand to sit beside him that Mr Wopale finished
+dressing for it was very much I've got smock-frocks poring over with the manner
+always aided and where it was market-day, and give me to be on his -- that's a
+rank wet grass, it had betrayed him? Who's him?' said my eyes was going to say,
+the wine at the room, were heavy. At this occasion.) `Flags!' echoed my head
+tri9.Fyhn.SU/YlvVjSi3M/ylMdK88iRo%d8/cuHyS5Am1oeQ/XM40zgdj/q%9CLKm9Q/IOwvLrlTi?nDUET=e95%a3qf&dSTE=X5aY&pWtb=&AS48RI=71Z91stUL8Oc&z1%B6=fVvMzZUyI+Niwre%5FXyVRF&QtAo=5
+in a circle, but for fear of myselfwith amazement, when I ask Joe peeped in
+<Ftp://Kroc.Ls4-tkd7.sg:58219/9tq-FJyL?Qb/e0alokGZ2/MKTHP3Wsw> the eyes.
+Pitying his iron on his shop; and liver out.' He could dissociate them to Joe,
+throwing any for it for their loaded muskets on exceptional occasions. AT the
+churchyard, the fact that if to hold himself up, and shook her cleanliness more
+from my grave, and when I uttered a
+pmg4ty.m59480p2f69.fV.COM/X98xZ.E/cTleUeS/9P6zeVQjfd30/eVVvE4/Zyxm1SSqe9u/WP%a5hS
+onco mmon one, `Will it?
+<6P.BD/du%F8CoA/W0jyU5x6HXyVB/EOpU%0BP%BET/TBlhd%772ObORj/PNPXkVHaEY> I have
+turned his hospitality aPpeared to seven and lending me, and
+http://5BCY.X3.SG/N~63s98IV2/?KuYCn%3160U5h:%BCU%DD='6uk3OyUbosbcu+l7U89Ozt12K+P/VK4+GhwEZ+D7Z5ByEYxG&8=#aa7R7i~K
+I knew of <https://38yyrnu.UY/8Kl08k%157n9p/TEeDKN/qQnmQFd> whom did I suffered
+outside, was not angry with a
+http://5PXM48/G%9fUxcBwBjXI0/1UJen/MF%30I6/eOsMzFMiM long
+<Http://s8AL.rc94r4iftx7qeg4cbjjv5.za/mYk9UAydyn4q@w/T7K/dd%8aIXPp> `Well,
+Pip,'
+Http://130.165.027.114/o8bwef/X%70neu3uGKY/NU%f8xTKW0;hTKK/V;%edBnJYWG0MI/ZlDMtVPK7?k1N:WnR=%3DNffenC%67+sf(z0U!mZFe+6YqpF0Ei4l&kea=&pv=0FrYO&%69j0HYlx=HVIq&sWgaQHZnyxp;=%97SOx&QbgYd=72tO&ugOWlP=TaHT&Zg5o=c,2tzpy&Xr=Nltupn6k&nxkPS%10oJY%74jL8=5c%58%77#E92Lme88eh
+Joe knew I went out in the ties between the High-street of
+sat8a.cc/n:G5Bs4/%92Qx7YH/%933F68jWsdw/mgMLj/b9uFtDS/fCBe=77/LYHeH his boots,
+and I should have dark flat in-shore among a great wooden bedstead, like
+file:///8NiXGOZYq earthy paper, and exhibited them
+ftp://[14A4::]/6gQ%83ppX66/Fm%0fhsGDdq86c52B2AReDTW/CGafhb/4LAIXfs6vOHd/DHtw5%A1
+for she took for instance?' `Yes!' said http://astx.i8o5jdypn1ly.LC he. `When I
+Ftp://7j.N@Ptavog8.gh/%FDJUUJB/nrC6%4as/AM2BxLCU:fGwm know the bleak place of
+ten?' And why <file:///LD3OAKQVR> on the outraged majesty of course
+http://jVVR4GZ.BG/XELY1/P=cusbVv5o terminated, and the stairs. My state parlour
+across his manacled hands; `I'd never
+HTTP://4fx.3kt642w.GF/k4Nruf/hyO_xzJ%982n/BhxTVE5LR/VT7cIG%66726zz/YQCAvC/eTYPd%2Af%18tPt6Y
+taken a rimy morning, and took another
+ftp://1py.jhl5-h.53.39PN2C.xN.ps/Q6kM9aOm7 horizontal line and then I knew I
+saw the 1MRTJ51.mh/OT form could see that they sat in sitting before our
+bread-and-butter down the festivities of <file:///RlgHP4tRuBYzCPY/> it off,
+Pip?' cried my pocket-handkerchief with his destiny always to be cut your
+http://[8F09:703a:5b45:F653:AB26::]/C51LFNl/tS8p/yG8y53@Wb?eBrhL=%f0Rj:Vl#%11Z
+father were read this, the wall, he wore a particular convict suppose that you
+to know at every evening the military had shrunk to stir the pie, but guns
+firing, and it a look at anybody's hair from a badly bruised face,' said my
+ease regarding what FILE:///TmzdtWFH/1WP2R%b3nSKls he looked when he knew it
+made the clerk at last night left me whenever I did ask you are both of the
+knaves, Jacks,
+http://5o0a8epm-rx6n67ta82256jav-nk4.lb/HbOqUc/TIVeqJ7Ohp/BjDwRDKJ/JZO this
+man; but, except that he took a shake at me think.' I
+File:///AvnO.7k/P0YrByEN2yEm9%1646/QKj7fR2/%1F0JYW0y/qscsiKGeGfPA/1rkuJyne%12/
+might not hope of other jewels sparkled on his eye -- `that when I see no
+<File:///1Hm4/bcNXO0cG%45XJo4RK4/SQGEP5/ELAGqI> more than the old rag tied up
+file://4jc3bg.zs/WfjCr2aeWME/Nv4A4B/invk2d1h my orders from school, Joe,
+glancing at the early in the green mounds, he have fifty boots on,
+Vj1.Ngq.LI/FR2%b7RU_z%a1Tf2vy/rysXmZ0/ and Mr Pumblechook.
+Ftp://wkws.yi8srfw.tm/sWvr8nVIPq3lD%16r71KGXZx/zTdcV/N%02%6ER5gChmS/uxEJA26q
+`Well to admit that conciliatory air with his former laugh. `Have a hand across
+the stiffest character, like the leg who read this, and confound
+Https://cf3-0aw-g8zmm-k.AO/mYGm9AqQW%E4q?6u=&rX= you spell Gargery, who act
+pretty. As it had been white veil so much for my earnings were my face ever go
+down in a pain in
+8vv-rhcodmrr42jd6zmrnl7xa.F1igvm2.RO?rQOIRt=Q&Z8=1WyCZjZv83+lpB%7a a
+confidential voice,
+<Http://009.130.112.154:65403/z6iLA6cr/%3edXQdq1/yHKzFjDA3nAKTr/Ot4A3f%4DIzccRDaDQcC>
+and then hwpmi.upmzdzzhsrz.e469.ee/SXdNeY7NHR6/Vr6%FDr he looked at last, Joe's
+hand anywhere, they'll make them while they limped along at his fair
+http://[C7E7:57e7:b08c:9FCD:4B77:4de1:229.020.164.172]/LnIzKLn/StXMmto reason
+for the stone, and I was a rank wet flat. `I mean by hand.' Mrs Joe greatly
+alarmed me to escape my grave, and she been there was there were then he has!
+And although my sister. `If a hundred. And now that he has! And now, resting a
+kitchen, and
+Http://2-6SB2KV8V8MV290SIC08D9J7-IRM9FTPC8ZZ.hwo9el74qqv1.zm/tr9K2BSFkbU-A8wJR/CGEL_82/cnMuBB%a3j34
+hunch file:///fUtCm%b6qNK/lltu?NvBAhM/sJ8pOm:/jJ18OTM6U%f5v%3f/ of his
+definition than the forge!'' I meantersay the kitchen on
+http://76OXC.pn.GA:15181/OPErhH1cHtl1ba/eIPkR6%1EG/8fVd02k/Ky%b0D5izq4k my
+bread-and-butter out on a shot with Uncle Pumblechook interposed my way back.
+The other man, licking his hospitality aPpeared to no more illegibly printed at
+me love him up; of having my neighbour, miss.' `Beggar him,'
+ftp://154.108.127.0/vGpMboeazp05/usfmVeitt0pf3o/Ue4OMVT/sJ9BAYSLje said the
+knife
+<ftp://ivbv0.zCR-0J.lku/6m26/7tElM/%b2%0BI.Ft5AjDVp/oWyMVmsG/3%8E1FE8Y/0zdIl/m3otUSQeI7>
+and to offer the neck of her had assailed me to speak no hope to go head
+file:///0Y7NWf4qwhw9wXP/6ll5YWM55W%9050rPeqawX%F9/HleEmM that time. But he were
+unreasonably derived from the giving me when I calculated the market price of
+the way to follow you?' `No, ma'am, I reached the shudder of the company
+murmured `True!' and your mother.' 5LUX-O.q-33d.tn/smzXQJn3H/81mg%4de_/jb%97hT
+My father, several times; and Mrs Joe in the room on the figure of things,
+seems to lug me away from the river wound, twenty years older than this boy!'
+said I, and how I broke out on his deepest voice, `Do you would go, and they
+were far more feeling his feet, I do drop down his feet, and another glass!'
+`With this <http://84W32/CCKpkt/c0bqCnoQ5Y> boy!' exclaimed my little brothers
+of thorns or half-yearly, for the fire, and chain of the threshold of a
+quantity of remembrance, instead of her needlework, l put before us,
+by-the-bye, had been brought you dead and in the table. Dresses, less splendid
+than I saw her door, old bruised left side. `Yes, Pip,' said Joe. `I thought
+of, when I <ftp://nyqaz.MT/0OfOsU7S1H9BM/OjhdD/izbR4txUY> could. `Who d'ye live
+well lighted the house
+8wo2j2c1z9s.ef2ki0mlvvnjm5vfyu.t5a-yb41uykgo5kn1qxzffhz667dty8mytg6ir7os9hoxwm2.mw/%39FEVmD/%a4qRT5W5qW.yR/8XB9NHyB/
+ready for us -- `Well? You can't get to Joe,
+<http://rbf6ezzlhpe.hk/%0DK8/IXXJAsC?mV8vvDI8K=6t9%6EG1Dt+M7N+D5n@Vd79n%d8E+gj+ofnZ%16loobN+f3-S+e,IH&lnh=>
+stamping her head as such, Joe say, `You know, Pip,'
+wu3w.0J5.lv/m9IZaWkw5/xY2%54pNYS9HL/Nhfns/e%bat2cKM/cUXgRzm2Srdt/2s2u/9h8zjwh929Bnp
+said my
+<https://209.73.217.17/dJvsqDH/RH6Ok_eSc8wO5/BOJws6/9f0DvXJ4/?%ea'Fx=P&6h3zz3eGCtK=4MF76p7Em>
+convict, wiping blood and play there. And then we went all through the withered
+like a star. genteel trade engaged his drink the hair on my conscience in
+disgrace. I found Joe
+jfajtdt5k6gu11la2jbih.MA/zcaTNUL/3q%31eLT%bc3S/L6v2rt/WtbA0%45~TIvPD
+good-night, and each with his look, and oranges and to the mare to be stiff
+company,' said Joe, that Joe's forge
+ftp://Defi-z.gr:16993/=7IIaMpVy3OLs/QtQD7qF5Vr/=RVbNDH8/y3oUHmX.v/Td%dcbiGlArA%720
+fire, another secret terms of returning such a liar born,
+ftp://[544f:e60a::8772:D633:DA1F:081.021.019.189]:62615/%CB6Wy1K/X%0EcoPQ/IgnCMLPynfx/fdFHb
+in my sister, addressing himself up, may ftp://1INQM6.4y.RO/ well
+<Http://T778hd416.g9r96v.bs:64804/GbWp%47K/zgTKs/cBHzmYZ=AI23VY> say what
+you're kindly let himself down too, covering the
+<HTTPS://6hp3j2y2tuakzv1rnq9vnvn1w0j6roo3if:58975/vH8BLTu3hzkk> graves round
+the interposition of any neighbour happened to think the room for Mrs Joe took
+the damp to have told no indispensable necessity of continuing for a state of
+laying her head
+ftp://Ye1dfbl0eae8lqiiqaojj.JO/8EjAq0TzD:/Bz3Pm2qyWo/ZX58A2/yjn%9F3xJZjsVhw to
+see that I couldn't Uncle Pumblechook wretched 66.242.9.138/CYHK1bGpZ/5yyVD%cbC
+warmint, hunted as being found myself Pip, is it at Pork alone. But, I must run
+the nHZMBEJWO.ST/ABXauli3wuJ/WUxhKaZJg sergeant. `March.' We are coming.
+ftp://[8463:c210::b5d1]:34094/8%AC7Fc/Qh6%62yFExJbdaB/0cAZ3iSKlk8sU;TYPE=D
+Don't lose your heart and meditating before us, and himself confessed that I
+could ever such a new sensation of report, and at me out of old chafe upon
+them, easy. Eh, Mr Wopsle had made for next to an invisible to the Hulks are
+http://vmlyl0efotpfd-tew59kcpsi2u7qd/UbXy1Cc/L%0cwnzmdjz/?iy=N16BnPMu1+eYFk%f6CB3z+s4Re5v8+MFTU+k+JDiN_+F1k&C%D0k=F78u+euh%1E1uzTGQio&bL_2omAu=iEEs+goL%b8g6+Y%3FBcek%102&WCz=e!Fg+MUif8Yba0k+uX+A91YO,Um+%70i%818Fpz2&6fP=HlD+%91pW+%f2HR6zs8zrE10ZPH+bWA.BB6k+Df3w:X85xDnDjSiPY+AyDpuSl4VEVTJzA3g&OtUR6=
+prison-ships, http://bCNNCLT.gxa2sbn/lAFakp and the damp lying on the Three or
+out now, and me alone. But such manifest pride and locked the company were
+speaking under his mouth, and stamping
+D19f.oD5.bb/xUG6W8VxTcjMG/jYMuWlVMygf/UtIwE13c/%a9wzpO%AFxQ9 her bringing with
+his own hands so I considered myself un- animously
+q8HY2P.r5T.AU/nc0Iq%28QAF/#yOD3%b3UA%d79e%1EmJp3 set the sergeant,
+confidentially. `My opinion of the front door and looking at me, and I defy him
+at Pork!' `True, sir. Many a --' he
+dPY3X09.AC/STpa%97U%b53yKP4Te/%71KZZvIC#nA1W2z considered
+ftp://3gb.xgjm/wF%ado0cM/u%0DmCW8L/d9Ss%61dKQ that I'll tell you, one of the
+best grace, `You would probably have hanged there for the guests with his
+teeth, without thinking that my obstinacy perhaps. Anyhow, Mr
+6m.56xkyt.32O.com/ToEAr%BEdi/xBpPU2NqC/74sgdq%BD9/WSrx5/5ldupD%47J/9boeZj
+Pumblechook, who was gone. As I should un- hooped cask upon the agency of them
+all night, sir,' and write his hands had to come down, for me.'
+<ftp://s0y6r7hg7.XN--KGBECHTV/xQizIlOK9/uxho7%bd/RvxbFGQ4o/O%42UeWF?/GAZ5E8b2/eRaq/l:-1ASwSpw/2FkowF%12Ss/vtCq9dysEc%1ee/>
+The Educational scheme or [d18d:1707::]/NGZMInsLF8/kgC3y/F66qc1qt6OWfeS/DyngWA
+I'll have something with an elbow resting a file. Didn't us, drew the river
+wound, twenty miles of the form of what came to copy at herself to eat, and
+when Mr file:///%55A4VpGsup Wopsle, and plaited the premises,' Joe
+apologetically drew a dogged manner, so like the table-cloth, with her pretty
+well and the rigging of this saving remembrance of reading, too.' `I'll tell
+upon the poker. `It was firing!' he were a most terrifically snarling passage
+like to blow that I was dreadfully frightened, and the end
+file:///WNEw%bfTWDLF/s%A9oZoWUo of pins and on my head tingling -- we were a
+piece finish with, as a jug on the tendency of his first
+Ftp://2tdk.Ube6velthhhx8o.GM/bUH4XycSEKkTE most obliging of silver paper,
+ftp://7kxk4ujzz.kp:32621/hbop0%25sK/rw7RBE0lTN/tX5BLF which they wouldn't leave
+this FILE:///IQExpA4kDvUfTkH6Bg/MeVJ4aIUbXCJf time, he had
+file:///SIE0AkJFq/ZPJLyYK/6hA3x1InlGm1 insisted on the boy to listen, and my
+never taken them up, but was a moment to herself, and tear him home yet! I
+opened
+http://047.014.184.200/Z_QdOwjzfBue4Nt/aEn/xuEQD/cXlnoxHIK%7d8h/1%eegEk7E0/8Ejku@r1Z/UZ4gG/%484zOJsP%1b/Lc1okbWRzN5UJ
+his ally the load upon him Good indeed! Now that he supposed from which ought
+to me more questions why he had unfixed his deepest voice, and shook with a
+sort Http://w9ys35.wb55p6l.hxl.rs/Y97%58Lp8JjLZw/5L --
+FILE://155.24.106.255/3VEZIT7 if it was to him, I might not do not afraid of
+report, and looking rather to make nothing of a confidential voice,
+d1y8zvhwq40bi3tom.hPCZ.gJ-286X.TG/ayWKrgAvF6tn/L4SgquZT6C/1DmNe/CI69rJ/%f6QrzZGkSQ
+as lda5l5wc.XN--HGBK6AJ7F53BBA/pr80SSZ/eNM1%D50lp/Rc%8EimOET if he would be
+supposed,' said the wind and so we were read the conversation consisted of it
+had so that we saw some bread, some
+l13t2t.sk/O%2BmRkw/@0AgGL@NX/wgt&aggDcp#0IYe'C brandy out: no black velvet
+coach.' FILE://a6ys9a4.xj.BY/%99BGXp/F=yJtxc71/gvXuHuB9k Mr Hubble
+212.072.006.032/6kV8ce%2e/%e7lzm-HB%4artP/zg6tWMW7RIG?U7=HAXw$D3sM%7DyDJ&Gt=
+remark that Uncle Pumble-
+http://[ea5::]/eIdv5xl/5qhxlOvzw%018f/N3RQQKCz/WzUnsSg8KA3/7ohHZCp chook. `If
+file:///g_T81EaNw2nJB/1yUUT you did?' `It was usually lightened by several
+times, so easily composed. It was a large and I said. (I
+<http://2XXY0MZ.fwa.791ck-2gx.bd/uO6FW?ZS5jE:=m:> didn't hammer and finding out
+of her hands, and should always led him up here.' The sheep bell.
+https://[8368:F154::f99f]/Y3h8FgzTYYpzn/zHFhQECC/CGtX/8v_~jn3Kn The rush of it,
+and broad impression of which was company. I had no matter of com-
+
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java b/lucene/backwards/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java
new file mode 100644
index 0000000..1159b31
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java
@@ -0,0 +1,317 @@
+package org.apache.lucene.analysis.tokenattributes;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import java.nio.CharBuffer;
+import java.util.Collections;
+import java.util.Formatter;
+import java.util.Locale;
+import java.util.regex.Pattern;
+
+public class TestCharTermAttributeImpl extends LuceneTestCase {
+
+  public void testResize() {
+    CharTermAttributeImpl t = new CharTermAttributeImpl();
+    char[] content = "hello".toCharArray();
+    t.copyBuffer(content, 0, content.length);
+    for (int i = 0; i < 2000; i++)
+    {
+      t.resizeBuffer(i);
+      assertTrue(i <= t.buffer().length);
+      assertEquals("hello", t.toString());
+    }
+  }
+
+  public void testGrow() {
+    CharTermAttributeImpl t = new CharTermAttributeImpl();
+    StringBuilder buf = new StringBuilder("ab");
+    for (int i = 0; i < 20; i++)
+    {
+      char[] content = buf.toString().toCharArray();
+      t.copyBuffer(content, 0, content.length);
+      assertEquals(buf.length(), t.length());
+      assertEquals(buf.toString(), t.toString());
+      buf.append(buf.toString());
+    }
+    assertEquals(1048576, t.length());
+
+    // now as a StringBuilder, first variant
+    t = new CharTermAttributeImpl();
+    buf = new StringBuilder("ab");
+    for (int i = 0; i < 20; i++)
+    {
+      t.setEmpty().append(buf);
+      assertEquals(buf.length(), t.length());
+      assertEquals(buf.toString(), t.toString());
+      buf.append(t);
+    }
+    assertEquals(1048576, t.length());
+
+    // Test for slow growth to a long term
+    t = new CharTermAttributeImpl();
+    buf = new StringBuilder("a");
+    for (int i = 0; i < 20000; i++)
+    {
+      t.setEmpty().append(buf);
+      assertEquals(buf.length(), t.length());
+      assertEquals(buf.toString(), t.toString());
+      buf.append("a");
+    }
+    assertEquals(20000, t.length());
+  }
+
+  public void testToString() throws Exception {
+    char[] b = {'a', 'l', 'o', 'h', 'a'};
+    CharTermAttributeImpl t = new CharTermAttributeImpl();
+    t.copyBuffer(b, 0, 5);
+    assertEquals("aloha", t.toString());
+
+    t.setEmpty().append("hi there");
+    assertEquals("hi there", t.toString());
+  }
+
+  public void testClone() throws Exception {
+    CharTermAttributeImpl t = new CharTermAttributeImpl();
+    char[] content = "hello".toCharArray();
+    t.copyBuffer(content, 0, 5);
+    char[] buf = t.buffer();
+    CharTermAttributeImpl copy = (CharTermAttributeImpl) TestSimpleAttributeImpls.assertCloneIsEqual(t);
+    assertEquals(t.toString(), copy.toString());
+    assertNotSame(buf, copy.buffer());
+  }
+  
+  public void testEquals() throws Exception {
+    CharTermAttributeImpl t1a = new CharTermAttributeImpl();
+    char[] content1a = "hello".toCharArray();
+    t1a.copyBuffer(content1a, 0, 5);
+    CharTermAttributeImpl t1b = new CharTermAttributeImpl();
+    char[] content1b = "hello".toCharArray();
+    t1b.copyBuffer(content1b, 0, 5);
+    CharTermAttributeImpl t2 = new CharTermAttributeImpl();
+    char[] content2 = "hello2".toCharArray();
+    t2.copyBuffer(content2, 0, 6);
+    assertTrue(t1a.equals(t1b));
+    assertFalse(t1a.equals(t2));
+    assertFalse(t2.equals(t1b));
+  }
+  
+  public void testCopyTo() throws Exception {
+    CharTermAttributeImpl t = new CharTermAttributeImpl();
+    CharTermAttributeImpl copy = (CharTermAttributeImpl) TestSimpleAttributeImpls.assertCopyIsEqual(t);
+    assertEquals("", t.toString());
+    assertEquals("", copy.toString());
+
+    t = new CharTermAttributeImpl();
+    char[] content = "hello".toCharArray();
+    t.copyBuffer(content, 0, 5);
+    char[] buf = t.buffer();
+    copy = (CharTermAttributeImpl) TestSimpleAttributeImpls.assertCopyIsEqual(t);
+    assertEquals(t.toString(), copy.toString());
+    assertNotSame(buf, copy.buffer());
+  }
+  
+  public void testAttributeReflection() throws Exception {
+    CharTermAttributeImpl t = new CharTermAttributeImpl();
+    t.append("foobar");
+    _TestUtil.assertAttributeReflection(t,
+      Collections.singletonMap(CharTermAttribute.class.getName() + "#term", "foobar"));
+  }
+  
+  public void testCharSequenceInterface() {
+    final String s = "0123456789"; 
+    final CharTermAttributeImpl t = new CharTermAttributeImpl();
+    t.append(s);
+    
+    assertEquals(s.length(), t.length());
+    assertEquals("12", t.subSequence(1,3).toString());
+    assertEquals(s, t.subSequence(0,s.length()).toString());
+    
+    assertTrue(Pattern.matches("01\\d+", t));
+    assertTrue(Pattern.matches("34", t.subSequence(3,5)));
+    
+    assertEquals(s.subSequence(3,7).toString(), t.subSequence(3,7).toString());
+    
+    for (int i = 0; i < s.length(); i++) {
+      assertTrue(t.charAt(i) == s.charAt(i));
+    }
+  }
+
+  public void testAppendableInterface() {
+    CharTermAttributeImpl t = new CharTermAttributeImpl();
+    Formatter formatter = new Formatter(t, Locale.US);
+    formatter.format("%d", 1234);
+    assertEquals("1234", t.toString());
+    formatter.format("%d", 5678);
+    assertEquals("12345678", t.toString());
+    t.append('9');
+    assertEquals("123456789", t.toString());
+    t.append((CharSequence) "0");
+    assertEquals("1234567890", t.toString());
+    t.append((CharSequence) "0123456789", 1, 3);
+    assertEquals("123456789012", t.toString());
+    t.append((CharSequence) CharBuffer.wrap("0123456789".toCharArray()), 3, 5);
+    assertEquals("12345678901234", t.toString());
+    t.append((CharSequence) t);
+    assertEquals("1234567890123412345678901234", t.toString());
+    t.append((CharSequence) new StringBuilder("0123456789"), 5, 7);
+    assertEquals("123456789012341234567890123456", t.toString());
+    t.append((CharSequence) new StringBuffer(t));
+    assertEquals("123456789012341234567890123456123456789012341234567890123456", t.toString());
+    // very wierd, to test if a subSlice is wrapped correct :)
+    CharBuffer buf = CharBuffer.wrap("0123456789".toCharArray(), 3, 5);
+    assertEquals("34567", buf.toString());
+    t.setEmpty().append((CharSequence) buf, 1, 2);
+    assertEquals("4", t.toString());
+    CharTermAttribute t2 = new CharTermAttributeImpl();
+    t2.append("test");
+    t.append((CharSequence) t2);
+    assertEquals("4test", t.toString());
+    t.append((CharSequence) t2, 1, 2);
+    assertEquals("4teste", t.toString());
+    
+    try {
+      t.append((CharSequence) t2, 1, 5);
+      fail("Should throw IndexOutOfBoundsException");
+    } catch(IndexOutOfBoundsException iobe) {
+    }
+    
+    try {
+      t.append((CharSequence) t2, 1, 0);
+      fail("Should throw IndexOutOfBoundsException");
+    } catch(IndexOutOfBoundsException iobe) {
+    }
+    
+    t.append((CharSequence) null);
+    assertEquals("4testenull", t.toString());
+  }
+  
+  public void testAppendableInterfaceWithLongSequences() {
+    CharTermAttributeImpl t = new CharTermAttributeImpl();
+    t.append((CharSequence) "01234567890123456789012345678901234567890123456789");
+    t.append((CharSequence) CharBuffer.wrap("01234567890123456789012345678901234567890123456789".toCharArray()), 3, 50);
+    assertEquals("0123456789012345678901234567890123456789012345678934567890123456789012345678901234567890123456789", t.toString());
+    t.setEmpty().append((CharSequence) new StringBuilder("01234567890123456789"), 5, 17);
+    assertEquals((CharSequence) "567890123456", t.toString());
+    t.append(new StringBuffer(t));
+    assertEquals((CharSequence) "567890123456567890123456", t.toString());
+    // very wierd, to test if a subSlice is wrapped correct :)
+    CharBuffer buf = CharBuffer.wrap("012345678901234567890123456789".toCharArray(), 3, 15);
+    assertEquals("345678901234567", buf.toString());
+    t.setEmpty().append(buf, 1, 14);
+    assertEquals("4567890123456", t.toString());
+    
+    // finally use a completely custom CharSequence that is not catched by instanceof checks
+    final String longTestString = "012345678901234567890123456789";
+    t.append(new CharSequence() {
+      public char charAt(int i) { return longTestString.charAt(i); }
+      public int length() { return longTestString.length(); }
+      public CharSequence subSequence(int start, int end) { return longTestString.subSequence(start, end); }
+      @Override
+      public String toString() { return longTestString; }
+    });
+    assertEquals("4567890123456"+longTestString, t.toString());
+  }
+  
+  public void testNonCharSequenceAppend() {
+    CharTermAttributeImpl t = new CharTermAttributeImpl();
+    t.append("0123456789");
+    t.append("0123456789");
+    assertEquals("01234567890123456789", t.toString());
+    t.append(new StringBuilder("0123456789"));
+    assertEquals("012345678901234567890123456789", t.toString());
+    CharTermAttribute t2 = new CharTermAttributeImpl();
+    t2.append("test");
+    t.append(t2);
+    assertEquals("012345678901234567890123456789test", t.toString());
+    t.append((String) null);
+    t.append((StringBuilder) null);
+    t.append((CharTermAttribute) null);
+    assertEquals("012345678901234567890123456789testnullnullnull", t.toString());
+  }
+  
+  public void testExceptions() {
+    CharTermAttributeImpl t = new CharTermAttributeImpl();
+    t.append("test");
+    assertEquals("test", t.toString());
+
+    try {
+      t.charAt(-1);
+      fail("Should throw IndexOutOfBoundsException");
+    } catch(IndexOutOfBoundsException iobe) {
+    }
+
+    try {
+      t.charAt(4);
+      fail("Should throw IndexOutOfBoundsException");
+    } catch(IndexOutOfBoundsException iobe) {
+    }
+
+    try {
+      t.subSequence(0, 5);
+      fail("Should throw IndexOutOfBoundsException");
+    } catch(IndexOutOfBoundsException iobe) {
+    }
+
+    try {
+      t.subSequence(5, 0);
+      fail("Should throw IndexOutOfBoundsException");
+    } catch(IndexOutOfBoundsException iobe) {
+    }
+  }
+
+  /*
+  
+  // test speed of the dynamic instanceof checks in append(CharSequence),
+  // to find the best max length for the generic while (start<end) loop:
+  public void testAppendPerf() {
+    CharTermAttributeImpl t = new CharTermAttributeImpl();
+    final int count = 32;
+    CharSequence[] csq = new CharSequence[count * 6];
+    final StringBuilder sb = new StringBuilder();
+    for (int i=0,j=0; i<count; i++) {
+      sb.append(i%10);
+      final String testString = sb.toString();
+      CharTermAttribute cta = new CharTermAttributeImpl();
+      cta.append(testString);
+      csq[j++] = cta;
+      csq[j++] = testString;
+      csq[j++] = new StringBuilder(sb);
+      csq[j++] = new StringBuffer(sb);
+      csq[j++] = CharBuffer.wrap(testString.toCharArray());
+      csq[j++] = new CharSequence() {
+        public char charAt(int i) { return testString.charAt(i); }
+        public int length() { return testString.length(); }
+        public CharSequence subSequence(int start, int end) { return testString.subSequence(start, end); }
+        public String toString() { return testString; }
+      };
+    }
+
+    Random rnd = newRandom();
+    long startTime = System.currentTimeMillis();
+    for (int i=0; i<100000000; i++) {
+      t.setEmpty().append(csq[rnd.nextInt(csq.length)]);
+    }
+    long endTime = System.currentTimeMillis();
+    System.out.println("Time: " + (endTime-startTime)/1000.0 + " s");
+  }
+  
+  */
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpls.java b/lucene/backwards/src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpls.java
new file mode 100644
index 0000000..66901ea
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpls.java
@@ -0,0 +1,178 @@
+package org.apache.lucene.analysis.tokenattributes;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.Payload;
+import org.apache.lucene.util.AttributeImpl;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.AttributeSource.AttributeFactory;
+import org.apache.lucene.util._TestUtil;
+
+import java.util.Collections;
+import java.util.HashMap;
+
+@Deprecated
+public class TestSimpleAttributeImpls extends LuceneTestCase {
+  
+  public void testFlagsAttribute() throws Exception {
+    FlagsAttributeImpl att = new FlagsAttributeImpl();
+    assertEquals(0, att.getFlags());
+
+    att.setFlags(1234);
+    assertEquals("flags=1234", att.toString());
+
+    FlagsAttributeImpl att2 = (FlagsAttributeImpl) assertCloneIsEqual(att);
+    assertEquals(1234, att2.getFlags());
+
+    att2 = (FlagsAttributeImpl) assertCopyIsEqual(att);
+    assertEquals(1234, att2.getFlags());
+    
+    att.clear();
+    assertEquals(0, att.getFlags());
+    
+    _TestUtil.assertAttributeReflection(att,
+      Collections.singletonMap(FlagsAttribute.class.getName() + "#flags", att.getFlags()));
+  }
+  
+  public void testPositionIncrementAttribute() throws Exception {
+    PositionIncrementAttributeImpl att = new PositionIncrementAttributeImpl();
+    assertEquals(1, att.getPositionIncrement());
+
+    att.setPositionIncrement(1234);
+    assertEquals("positionIncrement=1234", att.toString());
+
+    PositionIncrementAttributeImpl att2 = (PositionIncrementAttributeImpl) assertCloneIsEqual(att);
+    assertEquals(1234, att2.getPositionIncrement());
+
+    att2 = (PositionIncrementAttributeImpl) assertCopyIsEqual(att);
+    assertEquals(1234, att2.getPositionIncrement());
+    
+    att.clear();
+    assertEquals(1, att.getPositionIncrement());
+    
+    _TestUtil.assertAttributeReflection(att,
+      Collections.singletonMap(PositionIncrementAttribute.class.getName() + "#positionIncrement", att.getPositionIncrement()));
+  }
+  
+  public void testTypeAttribute() throws Exception {
+    TypeAttributeImpl att = new TypeAttributeImpl();
+    assertEquals(TypeAttribute.DEFAULT_TYPE, att.type());
+
+    att.setType("hallo");
+    assertEquals("type=hallo", att.toString());
+
+    TypeAttributeImpl att2 = (TypeAttributeImpl) assertCloneIsEqual(att);
+    assertEquals("hallo", att2.type());
+
+    att2 = (TypeAttributeImpl) assertCopyIsEqual(att);
+    assertEquals("hallo", att2.type());
+    
+    att.clear();
+    assertEquals(TypeAttribute.DEFAULT_TYPE, att.type());
+    
+    _TestUtil.assertAttributeReflection(att,
+      Collections.singletonMap(TypeAttribute.class.getName() + "#type", att.type()));
+  }
+  
+  public void testPayloadAttribute() throws Exception {
+    PayloadAttributeImpl att = new PayloadAttributeImpl();
+    assertNull(att.getPayload());
+
+    Payload pl = new Payload(new byte[]{1,2,3,4});
+    att.setPayload(pl);
+    
+    _TestUtil.assertAttributeReflection(att,
+      Collections.singletonMap(PayloadAttribute.class.getName() + "#payload", pl));
+
+    PayloadAttributeImpl att2 = (PayloadAttributeImpl) assertCloneIsEqual(att);
+    assertEquals(pl, att2.getPayload());
+    assertNotSame(pl, att2.getPayload());
+
+    att2 = (PayloadAttributeImpl) assertCopyIsEqual(att);
+    assertEquals(pl, att2.getPayload());
+    assertNotSame(pl, att2.getPayload());
+    
+    att.clear();
+    assertNull(att.getPayload());
+  }
+  
+  public void testOffsetAttribute() throws Exception {
+    OffsetAttributeImpl att = new OffsetAttributeImpl();
+    assertEquals(0, att.startOffset());
+    assertEquals(0, att.endOffset());
+
+    att.setOffset(12, 34);
+    // no string test here, because order unknown
+    
+    _TestUtil.assertAttributeReflection(att,
+      new HashMap<String,Object>() {{
+        put(OffsetAttribute.class.getName() + "#startOffset", 12);
+        put(OffsetAttribute.class.getName() + "#endOffset", 34);
+      }});
+
+    OffsetAttributeImpl att2 = (OffsetAttributeImpl) assertCloneIsEqual(att);
+    assertEquals(12, att2.startOffset());
+    assertEquals(34, att2.endOffset());
+
+    att2 = (OffsetAttributeImpl) assertCopyIsEqual(att);
+    assertEquals(12, att2.startOffset());
+    assertEquals(34, att2.endOffset());
+    
+    att.clear();
+    assertEquals(0, att.startOffset());
+    assertEquals(0, att.endOffset());
+  }
+  
+  public void testKeywordAttribute() {
+    AttributeImpl attrImpl = AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY.createAttributeInstance(KeywordAttribute.class);
+    assertSame(KeywordAttributeImpl.class, attrImpl.getClass());
+    KeywordAttributeImpl att = (KeywordAttributeImpl) attrImpl;
+    assertFalse(att.isKeyword());
+    att.setKeyword(true);
+    assertTrue(att.isKeyword());
+    
+    KeywordAttributeImpl assertCloneIsEqual = (KeywordAttributeImpl) assertCloneIsEqual(att);
+    assertTrue(assertCloneIsEqual.isKeyword());
+    assertCloneIsEqual.clear();
+    assertFalse(assertCloneIsEqual.isKeyword());
+    assertTrue(att.isKeyword());
+    
+    att.copyTo(assertCloneIsEqual);
+    assertTrue(assertCloneIsEqual.isKeyword());
+    assertTrue(att.isKeyword());
+    
+    _TestUtil.assertAttributeReflection(att,
+      Collections.singletonMap(KeywordAttribute.class.getName() + "#keyword", att.isKeyword()));
+  }
+  
+  public static final AttributeImpl assertCloneIsEqual(AttributeImpl att) {
+    AttributeImpl clone = (AttributeImpl) att.clone();
+    assertEquals("Clone must be equal", att, clone);
+    assertEquals("Clone's hashcode must be equal", att.hashCode(), clone.hashCode());
+    return clone;
+  }
+
+  public static final AttributeImpl assertCopyIsEqual(AttributeImpl att) throws Exception {
+    AttributeImpl copy = att.getClass().newInstance();
+    att.copyTo(copy);
+    assertEquals("Copied instance must be equal", att, copy);
+    assertEquals("Copied instance's hashcode must be equal", att.hashCode(), copy.hashCode());
+    return copy;
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/tokenattributes/TestTermAttributeImpl.java b/lucene/backwards/src/test/org/apache/lucene/analysis/tokenattributes/TestTermAttributeImpl.java
new file mode 100644
index 0000000..eceff4f
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/tokenattributes/TestTermAttributeImpl.java
@@ -0,0 +1,169 @@
+package org.apache.lucene.analysis.tokenattributes;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestTermAttributeImpl extends LuceneTestCase {
+
+  public void testResize() {
+    TermAttributeImpl t = new TermAttributeImpl();
+    char[] content = "hello".toCharArray();
+    t.setTermBuffer(content, 0, content.length);
+    for (int i = 0; i < 2000; i++)
+    {
+      t.resizeTermBuffer(i);
+      assertTrue(i <= t.termBuffer().length);
+      assertEquals("hello", t.term());
+    }
+  }
+
+  public void testGrow() {
+    TermAttributeImpl t = new TermAttributeImpl();
+    StringBuilder buf = new StringBuilder("ab");
+    for (int i = 0; i < 20; i++)
+    {
+      char[] content = buf.toString().toCharArray();
+      t.setTermBuffer(content, 0, content.length);
+      assertEquals(buf.length(), t.termLength());
+      assertEquals(buf.toString(), t.term());
+      buf.append(buf.toString());
+    }
+    assertEquals(1048576, t.termLength());
+
+    // now as a string, first variant
+    t = new TermAttributeImpl();
+    buf = new StringBuilder("ab");
+    for (int i = 0; i < 20; i++)
+    {
+      String content = buf.toString();
+      t.setTermBuffer(content, 0, content.length());
+      assertEquals(content.length(), t.termLength());
+      assertEquals(content, t.term());
+      buf.append(content);
+    }
+    assertEquals(1048576, t.termLength());
+
+    // now as a string, second variant
+    t = new TermAttributeImpl();
+    buf = new StringBuilder("ab");
+    for (int i = 0; i < 20; i++)
+    {
+      String content = buf.toString();
+      t.setTermBuffer(content);
+      assertEquals(content.length(), t.termLength());
+      assertEquals(content, t.term());
+      buf.append(content);
+    }
+    assertEquals(1048576, t.termLength());
+
+    // Test for slow growth to a long term
+    t = new TermAttributeImpl();
+    buf = new StringBuilder("a");
+    for (int i = 0; i < 20000; i++)
+    {
+      String content = buf.toString();
+      t.setTermBuffer(content);
+      assertEquals(content.length(), t.termLength());
+      assertEquals(content, t.term());
+      buf.append("a");
+    }
+    assertEquals(20000, t.termLength());
+
+    // Test for slow growth to a long term
+    t = new TermAttributeImpl();
+    buf = new StringBuilder("a");
+    for (int i = 0; i < 20000; i++)
+    {
+      String content = buf.toString();
+      t.setTermBuffer(content);
+      assertEquals(content.length(), t.termLength());
+      assertEquals(content, t.term());
+      buf.append("a");
+    }
+    assertEquals(20000, t.termLength());
+  }
+
+  public void testToString() throws Exception {
+    char[] b = {'a', 'l', 'o', 'h', 'a'};
+    TermAttributeImpl t = new TermAttributeImpl();
+    t.setTermBuffer(b, 0, 5);
+    assertEquals("aloha", t.toString());
+
+    t.setTermBuffer("hi there");
+    assertEquals("hi there", t.toString());
+  }
+
+  public void testMixedStringArray() throws Exception {
+    TermAttributeImpl t = new TermAttributeImpl();
+    t.setTermBuffer("hello");
+    assertEquals(t.termLength(), 5);
+    assertEquals(t.term(), "hello");
+    t.setTermBuffer("hello2");
+    assertEquals(t.termLength(), 6);
+    assertEquals(t.term(), "hello2");
+    t.setTermBuffer("hello3".toCharArray(), 0, 6);
+    assertEquals(t.term(), "hello3");
+
+    // Make sure if we get the buffer and change a character
+    // that term() reflects the change
+    char[] buffer = t.termBuffer();
+    buffer[1] = 'o';
+    assertEquals(t.term(), "hollo3");
+  }
+  
+  public void testClone() throws Exception {
+    TermAttributeImpl t = new TermAttributeImpl();
+    char[] content = "hello".toCharArray();
+    t.setTermBuffer(content, 0, 5);
+    char[] buf = t.termBuffer();
+    TermAttributeImpl copy = (TermAttributeImpl) TestSimpleAttributeImpls.assertCloneIsEqual(t);
+    assertEquals(t.term(), copy.term());
+    assertNotSame(buf, copy.termBuffer());
+  }
+  
+  public void testEquals() throws Exception {
+    TermAttributeImpl t1a = new TermAttributeImpl();
+    char[] content1a = "hello".toCharArray();
+    t1a.setTermBuffer(content1a, 0, 5);
+    TermAttributeImpl t1b = new TermAttributeImpl();
+    char[] content1b = "hello".toCharArray();
+    t1b.setTermBuffer(content1b, 0, 5);
+    TermAttributeImpl t2 = new TermAttributeImpl();
+    char[] content2 = "hello2".toCharArray();
+    t2.setTermBuffer(content2, 0, 6);
+    assertTrue(t1a.equals(t1b));
+    assertFalse(t1a.equals(t2));
+    assertFalse(t2.equals(t1b));
+  }
+  
+  public void testCopyTo() throws Exception {
+    TermAttributeImpl t = new TermAttributeImpl();
+    TermAttributeImpl copy = (TermAttributeImpl) TestSimpleAttributeImpls.assertCopyIsEqual(t);
+    assertEquals("", t.term());
+    assertEquals("", copy.term());
+
+    t = new TermAttributeImpl();
+    char[] content = "hello".toCharArray();
+    t.setTermBuffer(content, 0, 5);
+    char[] buf = t.termBuffer();
+    copy = (TermAttributeImpl) TestSimpleAttributeImpls.assertCopyIsEqual(t);
+    assertEquals(t.term(), copy.term());
+    assertNotSame(buf, copy.termBuffer());
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/analysis/urls.from.random.text.with.urls.txt b/lucene/backwards/src/test/org/apache/lucene/analysis/urls.from.random.text.with.urls.txt
new file mode 100644
index 0000000..bf0d419
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/analysis/urls.from.random.text.with.urls.txt
@@ -0,0 +1,643 @@
+http://johno.jsmf.net/knowhow/ngrams/index.php?table=en-dickens-word-2gram&paragraphs=50&length=200&no-ads=on
+http://c5-3486.bisynxu.FR/aI.YnNms/
+ftp://119.220.152.185/JgJgdZ/31aW5c/viWlfQSTs5/1c8U5T/ih5rXx/YfUJ/xBW1uHrQo6.R
+sJ5PY.b5t6.pn/
+http://Z%441S6SK7y%30K34@35j.np/RUpp%D1KnJH
+[c2d4::]/%471j5l/j3KFN%AAAn/Fip-NisKH/
+file:///aXvSZS34is/eIgM8s~U5dU4Ifd%c7
+http://[a42:a7b6::]/qSmxSUU4z/%52qVl4
+http://Rcbu6/Oxc%C0IkGSZ8rO9IUpd/BEvkvw3nWNXZ/P%17tp3gjATN/0ZRzs
+file:///2CdsP/U2GCLT
+Http://Pzw978uzb.ai/yB;mt/o8hVKG/%231Y/Xb1%bb6v1fhjfdkfkBvxed?8mq~=OvF&STpJJk=ws0ZO&0DRA=
+HTTP://173.202.175.16/Md7tF6lj7r/oioJ9TpL8/x%03PjXgMMBC7C3%BDWzoVMzH
+Https://yu7v33rbt.vC6U3.XN--JXALPDLP/y%4fMSzkGFlm/wbDF4m
+M19nq.0URV4A.Me.CC/mj0kgt6hue/dRXv8YVLOw9v/CIOqb
+ftp://evzed8zvv.l2xkky.Dq85qcl1.eu:1184/07eY0/3X1OB7gPUk/J8la5OPUY3/y1oTItIs1HFPPp/5Q02N0cPyDH87hSy/jheYGF8s%F3P/%86PmYhi/ViKHoxsHqM8J
+ftp://213.7.210.47/%e5pFkj6e6Jczc/ypJGG/z%663jYR/37IxLQBPr/Ciq50EUIdueyj
+ftp://alv0e-s.88.nJ2B34.ps/s0TgnaY?yOQUt/18CY%16IzNSQu/LaT3dD?io%80LBw%cdXDHU3/ppMyv/DbLDzyceaC/Goa%f3gn/5ebODAP0NAOD/6NkL/uP7CW/gS5TnaS
+http://278phvcx21/QGOy%395L/yy5NurSi8S/gMr%553%C9q0S
+z156ky.MU/.b%daGKqc/jYZkXK1WE/Abx589H6tADH
+Ftp://x68qwf2j7k.nc/qyZfwo%8a/
+ftp://yd.ng:40759/L1XAGIuzdMsjUIUwQ%F5/oDjgDsU/&Ze0Wz/ZeWR6cu;type=a#yDMuky
+Ftp://Xmswrxn8d-1s.pe.gm/dB6C3xTk%D3x/EKOiTmk%7c/API/0cdgpi;Type=a
+FILE:///rKnQkS0MAF#tM%53_2%03%d6ZICH
+ftp://R5ecjkf1yx4wpskfh.tv0y3m90ak.0R605.se:51297/zpWcRRcG/1woSqw7ZUko/
+file:///%C5=.%8by/uuFXEaW8.%7E4/DRM%33Kh2xb8u%7FHizfLn/aoF06#7srWW%2EKoFf
+HTTP://yA2O3F.XN--0ZWM56D/qPDTt/MwMXGQq2S7JT/TJ2iCND
+file:///Gdx5CDZYW%6cnzMJ/7HJ/J%63BSZDXtS/yfWXqq6#
+http://1qvgjd1.TP/7oq5gWW/Gwqf8fxBXR4/?Br,q=ayMz0&1IO%370N7=;Sl1czc2L+5bRISfD+w&ygP3FhV%E1w36=2Rx
+ftp://5SCC6BUYP.Knf1cvlc22z9.1dc3rixt5ugyq4/5OnYTSN/QpCdo/t3zqkI/pn5skT/oJgrGy7
+http://2dkbeuwsto3i3e8jaxi6su9wjlmwygtpdp7g65611z-2bbr82uhjqkdv2jrh7.KZ/FiSvI/aaB&dPQ%42kLdM
+FTP://Hi144dz6hctql2n3uom.GE/%1A4OBV%63h/DoA4hpXFmqldOw-MB/PNYoaSDJB2F1k5/Nx%BBEDhrHhcMB
+ftp://w0yaysrl.XN--9T4B11YI5A/y4FFU%c4F0B/Dh9%D1dGK3bN/EqxueQEsX2p5/xgf4Jxr%D9q/2ubmieRM
+http://t9wa4.rjcahbc06qmyk9jkhu3f.ZA/vIwW3sc3Pg/Bwmeo6KAjkRY
+N54l6e.vu/1m2%8bMFjv/oBdy%36.eL;33/N%d21Qvm/
+http://ah-2d4.ASIA/qmp
+http://195.139.142.211/%53fk2%90Pj3/V75ySPv@K5ISv/eUiXDAYc#e0%59
+dFU69ED1EJ0MLT.G8ef3o.bn:53301/klFVsh/YInBJE/SEIzo5EIoe3
+http://[3349:5FBD::213.207.213.043]/k4PbSpylXc%92Qckx/aQfV7X0V/25RN%49ZzvavLgf/re9~I?OP=nXo&oi0mm=f0e5&KK8=9V%13&Wd0%1Ce'0qnS=CFlgRw&4%89V6AON8%53jQhwUvln=r%6edz&W=Pq+T&a%F4H%51p%d9ZIU8l=uyA8S5J%95+Wb&xi3KNa1P-Xwu=&8tCH=BwNWf+%37G16&rsyBG=MnU4S
+5pn1q8q0tg.JP/%74XuKtp%F3fqLuGO/CMeC2IRRl./
+http://bmm4qto-360l-pbemedo4.SA
+sll-9eg.W6pv.rs/WtYGg51Pt%68/R8fsX4a
+FTP://r13oym76cysnp77r5sidj8sqgxzpl3ls4xzj.JE/ta%e0PA/5Jwza65o%7D6Uno/RyO%b1B/v6C8yo5K
+http://2b4ne4.5ji.oubrfdx24.UZ/%69kMsLF
+tv2yy8dnp.tN8DIWG.gr/ladfwSflp/Zr3YKvt/l1QlvEc
+file:///eK9K3g%47VnPYStl/GKGHYM6b%23nc
+file:///LtZpL/%1CU8lVvcWrTR/
+File:///yCPVGaCm/hHqFToHKZw/%29zmDPSQ6183%C8RfpdKQqkCd%51X/lyJABDQymQDL
+igth-n.Mcw.ar/LjMApEho5gp825BK/afaST/HWKafQMBv/
+https://l89xkmwfh-hprhz.tcay299q.2zruch0/uv/iM/
+file:///6yT8LrgRZG%10HsZ/CP1zI%98gHFiT/zAx4%EB/tBv6V8kS
+file:///
+file:///iYHw2RpUc/9MPLbyq7gTVSx/pYnzm4E
+FTP://[9198:015F::]/pU7tr7Zhgt/~cLd7w7.Gb/4MvIKc6iy%58vN/AGZ08o/uT%1e7vtcZD;type=d
+ftp://0dfw3ob8y.Jri1p4f-8.NG/DpihVuu3RJ/kEKaPppvl
+http://pZRLI6.ma/wAex4MoQ/jUv6Vh%5C2
+file:///F8%A5Go9qV/UYzwol/#839W58%4D!
+ftp://zo.dz/BSI/enk1F/XjnYRqwHBAyIYdC/rTXmyPP@Smcp:/%E9r7n
+nhzbw2.qyevbi.gn/Oxbk%737lUb/OBx7/VX67/%C4fxQxvns/4fNNJ9FjR/7YeGTW/7VOLjOD4/P%89.1Forp&3/wLVBbhK/3GdjIWB
+Ftp://4ie4a.fl8g3c5.wjvan5m3j.4sawo3mof.TH/wfcrCzx8%B50W24/ZxqhiPCLDP/SZbReZ4h7
+Https://j3bhn0.elhqoer--c.BI/ijN66pIVKxXjOmg/xCHrfc%feFdJPd04IG
+ftp://[8F7F:9507:280A:3192:EA30:EBD2:87.9.102.149]:4954/AwLZnTre/8g3Vo%6doz/Uw=dU%70nxbo
+6u.vkhga15zezgvdc68uii7dh0svzopjpr3.NG/rXE/6T~KV%06Kq/iO5vG/G2S9YU
+HTTP://lZSO.fr/%baWLoH/rsdViX1jMX/jKQg/aWFY%eekWu%17DTY/ASpif739Hht/hHM/oXdG6y/Es2c2Q/UVz6TevIJa
+a1JQT907R.ou7o81.al/3Vp@VDZp%9c
+http://g746.mhi.xtzovtn01w87au9.tc/%8Dn1XEzK/FsoFQ/xuL0wOc/YNP%53OS3/w5sIf7ox/t%22S9TxaTtK3/K%74%4EabDPe
+http://92-uzyzm.pr/UwJkzP/
+http://46cda.e92kuq1029.Igb3rjaqtc.Xgpak.T50lamdm4sscw1i8mq1-8.wx6wzqxd92z68sbs43l6.JO/Q7RzRWFz2/
+[BD39::62:47.178.113.23]/U4woqa77Wyygc2/cltcO5Xw%EDWZT/%5Fd@GP5vV#wUMoflXqTOsj
+Tw95.XN--WGBH1C/CK%fb%EF9/s%F4W7je06JY%49r/Y2L9fzlfd#fprt97Y%72
+file:///xjYnAHV2/g%21ZmKfq
+file:///JDyfQk8%669N~2L%ecj1/6PySMx8z%19%36/HP5GhmnNinF0p/vavqKxyBLV0a
+ftp://v2WJ0E6EX.gw:46170/R1g73Yli4ts/K%09PIdRA/DntZ@
+pVRN-P.ky/2UMoA1sYRpmUyd0/fEShDdCyd69Nyh6f/6zP%cevC69rdf0#XaOTpyS%73TQ
+http://4u3o/BKdhwRyzG
+file:///LdsHfPABFz1vRD1OB6Yl/RS6&1Gmz/mfYul/
+ftp://E1cdf-p.XN--MGBERP4A5D4AR:60510/qMaw4kSSgYM/7jgIuL/gSVW6O91/2bhnsj/kl7R5sgn6&X5EiZdZ0WhTX3T/fa%f3Azz
+z3ymb.KM/DdnrqoBz=YtxSB
+FTP://7kgip3z.XN--HGBK6AJ7F53BBA:15983/OYEQzIA0
+nezt6awdc.lSZDSU14B1OH.4n6nkmjyyj.cc
+ftp://085.062.055.011/bopfVV/
+ftp://Mbbn8n.6ge03fiivyc7of.PS/mvb/X8VNt/5WrMZpw/flC6Rs
+file:///vNLDR/Q7QXgZ/6ApHTc6bN4/yihY9ZGy%3BlK
+ftp://p2SJ4CE1KFC8CSRL2OY2ALA5TJOCN0FEM-W.biz:51412/
+078.085.085.242/kqKkywur6Kv4Qn/-CJv6i1Nxc/
+qow6.7RF9YUV12HR9CCFTWUTQRONLAM4PN82GI8E.GQ/oxUj%a6Ch2/bjjphp%34IJ/%65NQDGFab%14B%51M/QtBe
+file:///pQ%8CkB8ipZ%2cyZGMf/8USgpQ%54%48e/jCflvdl%3Ec
+165.195.223.067/Q3DEaK/58Z29OKkyF/fk9Vl/dKLw%7FR3Fzo1YsTPxmm/XiABg5j23J%1avyv
+f1442jv.3w4cg5hy.EE/8hsz%802pLxgSlD%edIt/ESbwLYo/tdn9mrEynmJF~
+[dfb9:d316:677E::2B7C]/gsORr%b7gc/?ehIX5=GTM0co5(Dmn91JN&8J=8W7wFuQfZk7sM#vYfk~Km
+[11b2::35.78.41.76]/vVfZvUimVO/K9hfOd/4gZUL=j%09PGr#o%23LnBOkk9
+https://oL2UQ.yLN-U053DA.bf/CfFIFwe/ZbgHFvLfbEYrStIS2h3r/pqd%14rY/aR5a8hx/aKWFJechP8DT/ypmeBjL7rcbUr
+https://[3790:ad57:0B63::e5f7:f6ac:164C]/Obax;zcD/Y%48%9a/Z2xcdar
+bl60k0jqkc9.oow84o1.BF/Xly5cTna/BzoQuHi3r8e/o5BDNrvT/=6HRdBjH/Mrp5%02/p%e9pT2Ae
+ftp://Bs3ceuxd8ii66gt.X8wwdpt.BB:27095/3BfkvfzcmTS/FTffh&S/gIWvJ5Kd/AlOQ%3EnO
+http://ch43n.51rkj.rze.mq/pJjrSAiuSv/3x/EK%59ReZM9w
+zQFC1SPO96J.Jy20d8.xn--0zwm56d:863/0OWpT4dpkMURAGe/nFg/LQBUr%3E/af7dO1
+ftp://Xctk9iigg.cat/u3cX1d/Sx6m3dql/d%46;type=d#0i%3cT1yMkZQ
+HTTPS://56aderic0knmip9lkqdqag14.uk:45885/lELiK:/vF%4C5Enwqy/P5NGJ2b/dD6sg1yMV
+ftp://vlt.3g45k63viz2.tcnm3.UA:60664/AJ9iqYk%c1/uKbohn2/K%D1kequ4z8rxFpJ
+Ftp://2gifamku.jqv10es.MX/yJ0rhtMYX/Y1Wq%F90RYO1F/NT0%aeAG3/r3Act1
+7WO6F.XN--11B5BS3A9AJ6G/1L%f9G0NEu/L2lD/mQGNS9UhgCEb
+ftp://mIMU.t4d24n4lyx39.zURN708MCNGK-TJ42GLLBQRJHVENGPO.bw:59930/KmBYQKHfcjNRe/rK3fUjg%0Ad/.zHeVoCaC5/w%A2%F7up9o7J0Eq/ySBVhB
+ftp://lv56pdepzu0b0fo-04qtxv5tt2jc0nsaukrhtz5-e3u1vcb517y3b135zl.e0r1hson.dk/3TVoqjp6%1FCFSkt/006VZfho/gxrWxgDawM3Uk
+Ftp://7n977.Niyt.2fgkzfhj.q7-DJ.Ow7a.it/5zfRi3PO8/1zfKT9%421tP/?SazEijJq%710COQKWeLE/TdUc%b2u/2AxBw9%4BUN6Zp4Z/KfUZd1MTdPv/L4m1tI3/WJvcK1
+FILE:///a7kRxh8/h43TYOY6J5%31B/ZfuF%9c3/
+[46C8:60FE:7ff2:79cd:69E1::221.191.034.036]/Q2MQ8mttjsMF/UqrKq0W%E6N1#YfB7A8CHYa
+https://hnk6fx.2uxg1e9o.pm/I=LKn%a2n4/J&RntX3mUxZ/B1Q.Ilpk3Icq%7fZ/ia:4DLuk8pvsD/mpED3egQJfH/O0es5zrzwWQIC%21K1
+ftp://133.195.101.060/U9x99/nrirgTvZnm/QLNzsm
+file:///RN%7EGq55Z%D1E/U0BQ1De/o8a@zHbAMS/GOA4KUcR/uaOR6C%f1Y/u5d7
+http://[f63f:096e:ee87:792d:CD31:A1B2:83FD:7322]/tnFLqVSRa5h1/%EDX1y4cxiv/GIo.OM0/M4lBr/xgHa=
+file:///Td=wh:cuTxKx/4B8%dc%616s&sE/snROY6GQc
+ftp://1fcu78n.COOP/eDRJd%82k8FEI/7fbDLiQncgOl
+http://obp6jiork.KP/pOedzk/Lo1uNQ796m/hjLXBOr%25AB1/
+file:///j3m%a5o5blRxq2/8aDBkHng/OR1ixi5h8kX/nCUz2aDz/
+file:///V1tX7rM/7zk
+file:///1qw4T%8BKBi3CKv/dxm6%7f8s78R/%83sF6J/K%33qfB
+ftp://tyt7r.u6ier1pxipif5.BW/vSq6akPyGUI/wVJ67VXTQeuKM/yB4zYqPh/0RuHq%58G/rBTgdr5F
+Ftp://4dx-s0az06e.Su7ir.SA:16277/HWkL7hR1SW/RzpkWipV/LCYQ6/gLpY%807L6/60H1z96%90xdQ/P9jx4DVu/oFa6c#gQo%57wv0vN
+FTP://o--B02WG9T7-BXW-RVAJCJN1IALU9EX65WSEXCRHM.Aeh-m.cat:34416/3q9yW%53m/FJ9&U84ik9&e/R.l/ji0sjWb%5edu12nbNSW5c/YMGfLcesN
+HTTP://lMxNbKW@tq1imryvi.P7g5o8np1.SK/um4Z2TESWBSrcN/fNehEdgh/sW%6fCP/b2fqBsG
+http://Lgwt071.sn/HPn4x/%46zCwYZzy/wzQVoL2sT%E3Yl?974Zu=X+JuSbGjrO&Xu3Fz%a8%19%5159f0r=afHdI3%F7FNrs&Mb0hjV7d=&I43eztc=1k:3+uSz+kdJP5c+bRkUBkF
+izojrse33.9WTVFAANL2Y.ly/i3ae/5%0Br%f5yL3/MsnfAk#T6,v%51Ev
+ftp://[8714:3F6E:aa8:c8fc:4F41:b8ee:44.74.99.35]/790Ug0mWq/7yBPb/pzh4dTX
+ftp://[ACC9::DD55:A45B:7a6b:177.179.158.116]/i1q3SzWTmO%09p%A3/FWDWq8u2Q/7
+Nw2m4j4.Br9kvjf-9.3wac-fh0uk.nysyu-emjwy.cat/PGDh:oW%5F/H34QSRwe
+6f9f3nny.mq/ai%cb2SZP/qfjOd2mpEH/LUZ.fxv/#3NaTgg
+ftp://R1x5yr2ij24e42wlojnp1i-b2bsacd01stfe5-10m0-3z6cwb3aflzrgoo.it:8665/oFbo12T%3Bng=x/%B2FcEUXPHAP/Ni0qL%0bPN4#yhp%5dO6
+http://[C794:4d71:ACD4:7AC2::30CE:B0E7]/T8igmbW%6C/DE1%1DyI457M#brpF
+HTTPS://rI7HAX2OS.bsajd56xb48.FO/fn9eA4%0A/G96ogw%69SGis/1V0hqVLN6zaQC1
+http://toncwiacr.0px.g7pud.MOBI/EdoW/qUMMnH
+file:///LkP1%5BcrQ/bnkvBi6F/Q3IRXB7Kt8mvDZ/ZKwDAp%a3/
+http://6DAK.8I6FGLS.t5YJHK9GCUVU4EB6NO513HBTWAU0XP5.GL/LDO%8CDB%82p9#
+file:///%46f%c5KRhPp/skp1X/OdoS-J1foeE/5H5RIWoip
+Http://180.036.254.028/VSiroQpjS
+d54n.Agqa6.7e4.JOBS
+https://5t33av.5u7.RU/SugrkGKg/FDf6cYm5QdHk%b3z
+file:///tGHsUEMaQS/VLn1%6Au#uGnrvY
+lm.27.jv4quihwsp.mw/mwCDm0cweP/A8wSZIQcZGV/uKBboAnqevGJEQT5d
+ftp://6g4.qe-s9txq3o8vvr5e.5YWZGPDM9Q.820d8wtribsgglbrnkafno126s8vflph9tfmt0mwew/qC0bInpp/fqxKQLzN/hAj/6PsngV;TYPE=I
+file:///aR3sSgC/GJu
+w26535-k.Ut2.MS/pQP1Rx/NUKUyRSr/21x/CcgOcN4U/Jzw%C6Ft/n5Mu9X
+ftp://75.22.51.21/wFDRPO/NLI1ZSecRAfFEAy/kZ4whP%C3A/
+ftp://1h3yyf3d8sffjx3rsf3k2y7c459c2gx/%2FfoFDEyWygHgKAuo/KhJZkBlC5r3%99/9I8SMy/25_&y0
+Ftp://215.239.176.156/tNfD%09mvdOM%28zx/fc3DTw2nf/#2kySKJ
+http://Vyt.4ferfwbkbm.owtk.me/LlUtIjj/BDovC/6vJ4Wbk/ihtBt4d%acVl/ywEBIdg%3dHb/
+ftp://Lq.es/%B1ZPdTZgB2mNFW/qre92rM
+file:///IZ47ESCtX%aatQab1/V553gjR?Me/#9%68qPw
+file:///Y?GG/BBqMPBJ/nsxX3qP/8P24WdqBxH
+ftp://7vl2w.jp/b%a5fBYyDR/ZN%62LG9aYpjSwn0yWg/nG97gndK%69XZ#fet%55XXZhslTNrq5T
+79wvzk3.24dyfkxg0f4z-hsqgqqzj2p9n59el0a.XN--DEBA0AD/:8epfLrewivg%488s/2ORX8M3/B0KpeeB/2rbuCnnBF/4P6%1cU6fTGNj/o%3aZMIHdO
+Uow9.sF.GP/sF3FCFSbCRWGNJY%aaU/DVXA5nIOWmjc6S/FQXdiBw/Y7~cVmpypgft/vU1%D4z
+ftp://[fd77:4982:C37F:a0a1:7651:E09C:117.093.145.017]/2l91g/s%79lJmUiZ/%A5R2qsJ
+[62c0::]/d1lmSzoB/5OBVnzn/kOXW%D23
+Http://Ed095eimjy.rlb5698d.kp/_l5uoOO/aA494s?3nSxdIpE=y%79qu+2un1hGR&J%76=8&L%bed=uY5hO+s+IKk1S&Q=HHXEC+Gof86QIRHy&35QY5=
+FILE:///#F9Bgl
+jyia054.l814D9SNHRRA5RJCCW.kvxga.XN--0ZWM56D/sBbx24%f2Tw2/Sd0Lul0Vg1bbIqW~/lveEw
+File:///KKfIe63z/BETB.T%C6sG/RcYgnOycg
+ftp://892f7.oel50j.32.9qj1p-g7lgw.MR:48021/XNKbk2PZQXSvOuGnOAnATDt3/XfHyJtvoC/PW7YrSgf#LmGWJgPw
+http://sisas.ua/4CU60ZLK4VgY8AR89
+FTP://7qf.hlj.TN/IXOeaf/t%c52Jxwy#YkcAy2
+Ftp://Gbu5t.HT/xad4fgjaN#GLpU3XQd6%7F(cHIz
+file:///A1omJiPzafgAm/addqzG%dc%62/Lw1mamTg
+http://89qw34ksf0qf6iq264of-1nya4ds7qvpixw8c951aw8wcm3.qxk7usa.N8j1frzfgnkbi9y2.XN--9T4B11YI5A/Unwn3/%97gnj0/GQgJC~OFxsdE8ubC7/IWy450/8%7CQVgdI8/soi0BviZt/Zjs%10i5Xh?qi8t9=rBbPok,Si&*Xl=Q+fT&Hx4%D70=84+8W%18+sV2BU6xCDP%47M&Usbms=
+Z7tid0uh.eZMOI-M1.umlsyksuzovqdw6wozbd.BW/m%e684OhC/ErAhpGiG
+ftp://tw7d-6yu.im:2055/%66qbqzss/OmPGW;type=d
+FTP://zst.tn/QcUpaA/VKvJ2/JN6AKew/iXYIiHm7mfPFmD%21E5/yTQpoiqdbaaS1/LnzOX#VqsobH
+eta0q7.2r79g.AC:34736/%abp87fVdPCY/PvO8Uk4WoLF#A*HP1A
+https://w9zhko2rttzndzivll92.sbzum.UZ/bgy8l68/Ix72mHu/zlA4CI/IQjc%CD9%255FxJ8A/Dbb%4eTCRu
+[2582::]/Mhm%55MWThR4Ne5mZ/xniX3IdG/
+ftp://224.3.121.112/G1w1g%1DdRi/T6Eb_NegqJs
+ftp://tn.z-o3vn3n4.5wg7.gs/loxilPpcLnsI/topa0Ez/Na%70Dcde
+syt7m.TD/2dxrQQvBXC78/Z754hngiYcM/eM%3CaeYeXX/nmUwguwk97VGL/
+http://isqogte5i.c-3oixcmy.SY/jlPVRlTs4v/enCZWc3Sl1dJ7/M5GTSZx/Ga%cce%63cLzTJvBodJ
+bYIAYQ.9mlnx.OM/t1KK3u/iyQFS4EGHN3uKogL3WGG/6wn5Q5ndq8kHO%734cxgEc
+Http://wvfftjk.do/a0%644z/?ATzWOxO1k=%85ulHR
+http://fnoY09@bm8xcfjyfiremhz9.sr/E4Rrq2/vQjQKj9fwV6r51/mn3x8he7/W4xCQs%FBvrzb
+ftp://vxfr4g5ka.kn/TZSPrYGzv/KzuB%731GA
+file:///vjS%f1/ktgHPAL/=v0cZ/WTpVo1/i6XlMCkNI/kukAwc8/thWUblm/c4ICXp/f8AHkj%1C4d%9107v%44hN/
+Ftp://t4qxt.hd9ok.aUQ7GIMBGXP.IS/%7ey71ndfLh/m%4A5P%75153tpU0hY73KfO6o/E%7aAkUlK3hX3Fg
+FTP://gJ8MRF8UYWFW.iq/cdX7RYOqS/6E6XUh%fcdHS1%dcoDwHgpFId
+http://01s0hfwz.TL/C9uEC/K9uWhknP3AxHW/%c56I1zL5Rfdd/sLJeP/2QkQNP/QcW%8aA0A/
+Http://gRWSMJ90XZNPAPHL90FB.zfyopzk/hMq%1fD/A5jQ%efiH4Csr/HTFm14uSXf/jW50yvQ6Mb/EJrahj19Y9Y
+http://i0.XN--MGBAAM7A8H/Uy6czi/rrAt8esL4/iL2xLka/B3j&7Inmt7g34
+file:///aZcnMM/Hnr1PCn/wlTztS7SpL
+http://2lv8030.fimc0v081i/cyEUoud6w/gfAlE/iQP:8/dZCue4cKVM3bs/JU%d5ZUA1t
+ftp://kF0NLTJGD.HM:44827/Y6CgKRiW/4r7G/Db%bb=7xD/tE/t4ooQHdBsrw/ZvgcX/qTCarGQWa~MKW5nn8NF/dcy%1caO%b8/Di%947%2cB
+ftp://4ufofbu/pmLZX%f2wJcQO/B%e0b%64oLObaEx&C/QViF1ohg/Rffvf
+dYC57.CI/=G0dg
+185.224.223.157/h8BdA%FEv/KLK2f%86LS/gwA4rKKHLarf/b.EyE
+FTP://uhw3qgl0bvfp568.e5wkz1l.Dug75a1j.US/R%AE5DNL%C4vMl-TXG/BDSu8PXNYU42aY/MR-hx1/mC2:SJqsCN%d7#smDUT
+File:///q3iMCFXfge/Bh%cdvWuy1w%E7Er/Jmmf7DkqSG%35a/VUvFz#8%510SIu
+file:///G%E7R44SI/L0Xsc/c15wyz?8Bs4rN7
+FTP://eQ23LB4U9CX.vcrnx.2fa.k6rjf8b.pe/8L163hbbt/J%26zcQf/lkieT5x/Efa/A2gUk/o%ef9PIBhPODaAn/p8%55Wsfap/BdTfZ4zm%2fbQt/SY7rMh
+file:///7RVk/qIRRZ0b/
+FILE:///Rq_/ec93s/HMB24%8esN/%4bO%cayWnOF
+File://Yk7ie7.xn--80akhbyknj4f/y4e4%2a0yHu
+ftp://4ps9b29prywnt6-1xt9t4cgi8sbwjj6obbw1x-2y-v2tft1eei67i.Hk0u4zwmd7o9z.jp/o4R1sdAnw/Hu408%CB/HdQ6cFhG
+ftp://7efqt.LB/EIX~:Q24/b0QhE%751s%F66R7A/IFxxOD2v/uOOPv5jARBJsf
+[A645:D622:eb6b:D59B::D48D:f334]/Ulld404y/IM~6P3
+FILE:///%16b72yhVw/2BPPCZg/KwHAJ0X3QT/I49wMwmls2j%15xkYc6qFZ
+FTP://octvv.2je8.oJRUDE.02y4htgs.es/zwVuzXoFKJ0k9
+http://[3A16::]/1rhxoXw9Cv/eWk5gHpYJ/v9gRo/un2Ygo91B%A1f2p/15hJ%A5o%A19TLjzzRrGUT
+iG4PTCCG.3zti905z3.ci/42j5.oKj/FZmOBY
+Http://pclly.36XVKSPBC/Nja5D
+148.020.113.014/ASuvNkg/Zcwt4/PjpwkEUVHbjkeKOgL/%f9hibk/NT9kSmJF%1A/5FaP@BkLf/jTre%balt
+tnjbgbiparss2x-xav2mitawqn9ema07kfk6kjck.xC1U6J.hm/scUu%E5D/qZ9K%1CX.d3mWJb/-SdvwN/nFS0ZdZDNQA
+http://[3173::]/YHDIJlMkv/oFpVHGs/7Dn%61pqA%23/ZnaIIPD%6cj/
+http://i4f8l.sc/WuJNKVuflVGa8/%85hi4B1G/mPs/1KfX%12/WswWA%B3i1OVsF/Z;wC5kkDQ/XIOtrdBl%D9%33
+https://v24gyfj.xfrc5dy6xuz3paev4rggl3xeg3vxzw7cz98pbcgum8xlczt-n.SU/Mb=PxgWX/J04ScMxk8u/oH%A08nv/3oXR85tM/
+Ftp://c82a3i5u.tf/v%D5/%05QNNYI&ssnoF.
+file:///MaIzEiaVY/ssIPwkItF%EBIUy
+Ukg.sb/Q24uLBUl
+HTTP://Aphi-iog2t.PE/SSwgnY7af/VabUxcEU2i/JI%434fkP%7cO#EWmOFU%5cy
+file:///FXYZhobB0jX%5BD7PIt8H8u
+Http://asn7b.LA/13Qp3t0dY/Mk0ldhZyJP/rRgIZlOu/hqt1qM9NT5tAGD07T
+Http://mb2.NI/eOXXAC0MNiEvJ/ul6ydqIPg/3JhlWx21r~sH/ZemaBb7j17X
+ftp://7i27:54542/B3rW/LSNLFJ%74J/%e4NHDP1svTU/Kkpr%C1%6cO/2wWp%f4MiYLhgWGSF/u0wNwK0B
+ftp://f8X.cat/L7Gj-OSdF/QBrO%f3okEZ/L%bdvAyxC5
+ftp://[6CA9:93a1::]/?y057O5/l9C:/XsBy2so5tX=D%71me/
+file:///%33P.AyK6nB/QkN%011K/iicc3HEIE%C0/v_7Wl%fdzMCBnfC
+HTTPS://zv21qs.ekofwyy.f1pd7snnae0n2nzfdclk1sf4hybx97u17piaj5-lul89bxrf775koowj.as/BAc33xOV7
+ftp://ko%5BM@183.207.071.131/tq~2QxL/d%D397GnaQgKtPMOsCp7fyVobgZ/Nhnp4LAKEvQ1V/1xFn%cbR%7BVU3
+https://fiuubt.bc-yrorta.kdn.M8mascygepb0csr.vpifk.G-p35wx.er/4wvko7/Wo9PsbrLI
+file:///LRVqPEfRevRI/nHtsA5k4iilQ/22vu%674y
+http://jX-U69Z4.3vuws.41h3q22bzs.o3hng9:6629/Qj=CQmh9/%9aCSTfa%0aXvFQ/u0zAICPSGUx/MqP32INW%00mp?ZmIZc=5o1okD&WEDMM6Qnm=0w5T&gajnp=GFwK+Ct8Pds+KRsnyPq+2UFmx+cwnDnvyn+Zf0VFXyk2+Aw67fL
+file:///XRDAcY5GGmj3/WoHYehPpF7/HS9LhdHOe%9fS#!SZge2
+file:///UIIGOxv6jvF2%c0/%A8J3%677Gmq8im1zklKhqx/HMhCSY2QcyxvL/
+http://Qhk9z.zm/cOGBen/mBsDycEI5V7L1s%84WUj7863/p%5f~okuRD51b0M?b%F2d%67ujGr=oh8PWUtK&j6uX7baX=&sg3RUocA9W=m5IaF&JWH9G=fyiOtnC3+7RJA+ippw96rvu+BxtGg&F6f1=jmPS&3PE0xX5=TGV%5c5J&%fc@NSEynhuvb=&MkRIt33=
+Http://[98cc:433d:2C25:62dd:54ba:d10b:63d3:4C40]/YlbNrJod/fdjuN/qYqSdqr5/KAbXYHO%F0m7Ws9
+file:///ywFY5HK/XAv@v%66o/M2O4Wlny50hypf5%02A8
+https://nWC9-RIA00RPVL4SSWRICWWX3NH5SMQIA7IPMCK174T30VQBL-M6.XN--0ZWM56D/CwE%e2rWaYZmE?X_coOVl=kqGQ&Pli=MjKg-+wO6Eh+lbbcN&x3M=3kQh99m92mRdf&iiO2wXgQ=qyWVG9G
+file:///enqvF%EFLOBsZhl8h2z
+ftp://133.4.130.192/p%b1LgcONfo%bc&kmH/Ibh6Lq%DCJhnswT%1A
+ftp://1xf.ipl4f0y6c4.VA/LHuq~/p2nPbE/0YGGNJB%DEje2psef_B/aKOuMl1Q9
+ftp://o6ou6n.N8.yyld.JM:24207/aS15Vk%0eg/M8jcXu%14d/%48odaw
+file:///7NToG6xM&SK=k8/wTdaPAFLzqBEJ/zHMDPj/L.fLv57c/z8QYrsKS/CEkA5FEhQXBQi
+file:///UWrC%9111nEhh/45FHiTx%98L
+http://35.iN13LEQV.z2d.in/%B2GBtdYtQjc4TTr/gLxjU%B3c?3m8B3t%24eK9%b8=kgc0f+ew+uux%7dOI+pbZ+H%9cS&%56mm6=rkQm+dHPh3gGj+1kC
+http://nEN5ZN.EG/%0efsf4v30L
+file:///19%9947/ksd3Sq7W78%27/2K_Ylzcu2q
+r8sht9qzsc1e2wp.ci/8SbPwlW%5ac/qKEqFi0Q
+ftp://zxmv98m49669kfvf24o12w3u93wbovfp-1smo6y90e27n133okplcjqrmv-a.CD/JM5RAAY/sJdBntYWuEY4uB7hz/ozRSmFJD/#Xv22:Xvg
+6S8.Crwllo5e3.jmtz.XN--G6W251D/6InlQn/hnhu2f%ac8tX/apq%0D6o/
+file:///gVW/nnRNxPfMXKb%72Aq%4A
+file:///Fzza388TQ
+file:///
+File:///kpiE4WSatjDV/phvv7gyfb%78b
+ftp://240.154.225.198/I%39uutdECwM/PViD~qPa
+td.KM/0Dkyg/B%65DiABz/wtqGd/i7%cepV%86XkA
+077.102.005.039/p53%0bsPeiZaRy/nQHLsKEbNdaX/nT9H%521/Zb7H
+https://Pu5aweu-29knkj3k41tw25h7xzm9pck96ey4q0gqzig27u.vLPR1Q4.vg/QANLMxa/gccQ1ekkRDr/?bXRDWO=I%0ap7%f4PB8S&t%a0Uhe1I$j$=Mm
+https://J-5ytf.nmp5zuopbj1qbl1ik2c4ihjwu6-q5dhn.ng/GDtBeBZixtl/6sgw9/tmeJ7k3I1hHJfM/2JYRt7towpNjvDWsumYmhu/nBVPkzSo/cBXPb
+http://HSZDX$An@ukj35.ve/9dLg7XrzV8g/hXhzX;2/Zw3KKwTP1um2/qej3miaDjj8v
+http://sL333Q.Zci48xtb4g6.lu/sQw4ZHF/M%99%1DNl/s58%a2sCxGQ?EgPNZ=qaG'U2CO
+file:///W%64hVsq1u9rIuZy/qO8j6EEwj/d48q1%6D/ko0ec%72/pcJo/MZQohRx
+Ftp://afq57indwrb0sjhgyczyx.se/%6FKey7AOE/IPWZg3ggMIM6%D48h/XnAuzG
+file:///wDwlQVR8i:0/mzefF/D3Pnkoza7Zo5iQdc/ckieGQos4JM#9rqA%DAD4
+9gcwbh3vcmfa0xw-k2.MC/66TaJz%FE/SnDRWAknGcI
+Ftp://%cdaTNzNPNu@w6H.V9aps/87/w@rPBGa/he%FBu4vpT
+le1u.43cdu0n4.bn/Q0i6uNz/9%275%a3dAS/B%2fpPkCW
+ftp://131.173.229.062/1IYcY/mJJ894/%89F%45HHRdA/eGlhL2MXm6Q/heBdvWm%3cVs%04/x3JjEB#2%2cQsgeK
+rtubvdk3.PF/L4TR1g%5f6/Caov%FC3vK3ofrH/pz33aV%54
+urlyuqr.ar/tzJzKM/gutrfWqv/IC%24bbmSS%02P?%24JV=zrJilQ+tH%7bh&hbO7Puq8c=K1Qt&ULqdYq=
+Https://pFOROCZ9.dRDP.gq/08VkBBPja8cCXZKLa/rEF28NoX/
+https://[5319:CAA9:0242:86EA:8e36:7086:B3E2:ded6]/Jq%C0P@jZ/KoNj84B5AJ=3jGk/7wdasVgHFexe4M/zgEZvK3vh
+ftp://Bvc6nmpdhn21400.Vo53pvqm0/u7jz0O3bbFTTegZa
+l0q.0b82ck3a.SI/EQf%a6#mhJ%0dfWnfM
+http://hr58b8n.bL0/LppkKdZGYdxiHg/2VXeZWR/T4fCmyN579
+http://1x6.yc6g6uw6htmwcrb10t4kwc393g29cctmtdxxz1j.KZ/G9lcwKju/UiH4E
+7T6OSH.PF/zfYyqdxITCI0
+https://2diizsrbfh.PK/t1zBYiDPZG8Kx:/pEN4b8xKu
+HTTP://r53fl98bazbqhc19-h-r.qif.AW/8sH0%59j%FF7/QPnw69%17Og9V9l/JAn2c7i/%7Fta3x/P%08HRF/
+qvpqmoa.O-0.FI/TDl%E6x1oUoACe/4VUZdMKL8Axud/JEZEF/KOR7Q7?ifYXMx@=&iI'!tR=p&k2Tv=Behew+RFW2c+w8NOK7+?BGH&:TYW.6(=H%B0Jvo9LvAy61V+YjewIUBKHe+lT543+BIss6Rz%25KTjd7+fOp-r+/PvG%fbP9kd4K02Z+IUXHyh&Lb1kab=FDdwA3_Z%81e&iiG=CVrO+1AhtbU1JSvh+Q;ay+Jb8c+%c1L%D4&m?r%0en=8S$wF&5JOA9WI=&kGJ=WjzqGX&Bew@sXE=cl4a+2S8
+http://jykpqk6.sc/VBPT/xNRs7JVoZKE/
+FTP://2w-y60heg64rnrmpyv43tpfhftxolu-5u.lG0BKW.LY/g%7aPAj5j/qxyE/D79g5vu/
+http://Unp.IR/tN;/bCXe/fxSdK%00%CFB5N/D0L1/bjf
+[cf65:1F97:24b8:652a:FB12:D0F7:181.134.252.162]/1jXwBjjxpC/0zKR6N%0bhawVF
+ftp://090.247.102.174/YZgWR%A1NP/f6YUa8dEOoOk/a7%59Geq
+https://Zn.RE:31587/Vam%acYZniEPiY/lBfiLn%F1/dlHe@m0#
+FILE:///FojXlCuj/OQXGX/JUHCBAF/TUAe8k7O/fnh8rautFH/e6%C2xGbsfELFVW%df/JKQk/gEO%589e7uMuM/SM%7dz%0chqvt%67/dc4fnbs%F3%5e/4rLtAbS
+http://247e/qBmVNrd4AstGuk/JkV%50CBmmp%06/%a5E%34TAY%E7/5WL:W%CB%193Dr=cl9rn&/mA9%651nvah%63hV
+qkwlh9jp618.k-x.de/xiraBM/6zj@AcW3NA/%CBeI4RpP5nz/FiWXIm/fy6YJd/n%006lFEE/uT7%284Q;fXK/a52ToS/w6jn4ZU4r8/:B~XHaw?G.cE=osg8k3&iGJ=V4&w1vL=me4QRwj&YFgq=%22zCDTqgmKC
+fjrb5z774.SA/PVZsWyA3sMJrb14P%995vIm6/dC5=Hj7?cxCp=bZ(40%15pi
+ftp://pd5mz0sw.53t.sent7dh.ki/U%57Qz9g?6/6TOmiq%6F/
+Http://g3t2w4.2AB0B.3eq7q.RE/fvvJYyHjd/%34FK%98WeZ/G5Ux06F2BDF/
+http://7Z0-0PC.txi2srk55gs1venx.uy
+https://i6.kzdyaq-v3.9j78y.oq5r.gpm7oh.x1fnc78-tli.5yu2f.3hfnkcvwoms.hWRAX7TAJ.7ei.tt/Ysy-/sRl/LZa6nw8
+Iq7sp.vLK69LN.lr/hjB0EW3t5%36/lSVsKT%3CWsL-%ADA1p%0ffG/M1S;SyAVBO/EvzIxfZpicuo/dOst%DE%E1w
+1lg7.sz/X@ENk92CPk/vVYJGN%act
+ugk7-paad2cswwq3kd82lp9r7-i93galijy4x4.vatv4ag.va/Eww6Y1XABn/pC3%9BzjH1q:sB%89Mu/WdjiQ32H/LEaekIokSv1%E61s/Y~wQYu9v8yDqSatHO8F
+http://Jmury.vc-wuwj.rn0o.ug/EhXMKL%64/CwKXyRnpk
+HTTP://V7c6lvas-wtxspcp53z7o-v9dt13mpp7gc9ezt.MG/q986Xs3Fzpo5/6tQRek0/zkdJt%605DYH2j0aVfgcn
+[0CFC::]/0611uPvtHJ
+file:///viHNVlfm/4BICnFqFz3mXP/1%0dxeFn%AC
+file:///ceic16R0Ht/b%AFXzo7oKlnID/v84LSyw/wBfvq3QVf/vuytS9wORE/tYsyN9i/msSNDC4Jt8/nPWzs35yu%ED/zvTeOit/uSVe?PyD
+FTP://8GJ0QK.rQ8H0BIQZVFQQHPAWF7EVV12.LU/dLOis5Hvn/YEA%C5Z68E%50hS/Ie1Sx/
+FTP://bGCO.apov3z1nrv.ke/cM4fSVF?%ff/tWLPVByl0/ABCz7EZc3/R2b7U8o9JM6p76
+file:///2%f5tf%F7dSLdlRwws/qnKbcUOCCP72RTJ/WTc=Xn%B88/
+FILE:///n4riCnF
+ftp://mQEGW184G.Hv3zhea6.ST/iW6mhdm/G9mpZUib4loe
+file:///
+https://A0ea6aeynb4z3fsvnh4wg6h7.9bicz2zg2-695lf1uql14i2sjf6pqh1sae2j3k8iptes.57/jzHSQ%ebP5/%e3%9Chd/#VqMzFZrd%ddpe
+6wmlp3ipb.cqi.ikf9wdku.arpa/dMq4GciIqW/aL%10jc%d5d%c4v
+file:///lT?KC#nXl!iMB3hl
+FTP://P9yyxqsh1rz2q-r7gp.h0W9VBZWGP.tk/gvbKQnzs/q1Gb
+file:///7KTju7/x2t7Qen83hFitH
+iawuqq99.AX/;aTO9WOuOPwl/UAbRoxCcv4
+http://h-juvh.3gtf/spUbB%2aq/#%9C2/LWN&
+vj021lv-xpcrzcaibfgk0.ad/dVYoNrxc5/NVH90Y7CCv%4E/vITM8z%C4?P9Y6IZlhse=7w1CwndaDA%79PY+r4Wm+esuV
+http://%d3fV6o@knpyxaoxorjk0xthy4c56-idtz3.i91eof5.mt/MM0jI8/mviceY%E9KnCQrwqA/xTTC@R/bgzg%6CfrsDT/uN8jUqZIRPdu9a27A/aNc%f4l1h9UUax#t4W~aw
+qc6iz4vjp42.9IZ.l87y.4m79dnm6i.tqhva6e.dumzoy.GG/aNgCtk310/ltjBeHJh5uJx/XMIgU=CSzwD3D/
+http://p7E5E0.hhvqt56.ug/2p6%2Cb~bL/JIlK:TS/KKKGy
+file:///3%aexrb7UdZ5GpR4ZIfoxwL/vQV%4a2zQxki/QRji6gHpMGgBaM/d%71A2CTpZv-kF0tD/Ig6roS8m4/~aA64OxN2yNDZ/fLLcgp%d0/He%98%b6JWoLAm/_aKE52/bcn8%06hs~If/IV9oQt%A1K
+f5ms.jp/%A1FpERWwTd%BFG/ExC8V5aqx5l2CLJr0mJb5u/DgMvEzAr2U/py9Vg/igr9PzANtw/FFiN1E7
+https://227.086.128.010:64985/MDKuFInA86qto5/_cK=4S%49Ic/SPp76/TlV%0Arlwfx/
+Ftp://171.160.94.43/ALTgS46I4VM/55PbbK/5N%faTSE
+Ftp://3zd7z.etw.XN--JXALPDLP/4UztCuTbW2z/LL%2cDI/dTYSi9
+t6xfr.wxjz5p2t5.zl8m4.MN/2cbpjk/gsdm/5Mvc-j3rc/16Wb65&c7x
+ftp://D02-auxxaeqnv9ve-jlmo3.l10vqu.12jl.2mvjwrsqm.BA/r71QLLNu6oGJjG/HbxrX1Grq8/QR%2agZv4hR
+file:///XoCg%EDVf/A3ibJYjU
+i44X.a8H-WP.zgmnrjxq.NE/oL42aLwl/h1unIUx2m5mhir/ZjNqL;n
+file:///KSPSz0d%734OBRur/v2feKz%7aC/SfV1syp
+http://29SB.j6/ojVDhx/%A7e34T8%01L%41BNV?6uRxM%DFd=qg9jmHtW5R&EeR=%f9,mnV.cGVNclEM54f+efsLBpEc+3V7mIJi+Dng2-Qk9&t=VWC!+5gUmI&c4c0sX%51=%03?a3mDKm+4rHPsfb%dc
+96.79.198.95/8JJUovS/
+file:///.LxM7EsLzp%d2/sOKzUh/IVX5Mw-PVormR
+5r.uL9CQEBDLX.bn/?3z283zb=k&q%d8u%aeOKQs=s2Ixcyjmlg&%52=Fc68M+%F9JLUS+4XTt7ypy%881+knwx%3CF+CUc1ZNLx)K8Ht&Bks=*woVYK?GE&vv=P+b+W%134Flc6+%2e2w5%cfPu%5BXUS+PAAvb+@e/E
+http://ol7ctcj1x.Ugk.na/jnDQG9WhW/r1cIpcqfGNMDWto0/DfPQlP
+ftp://ico390kww0.it/g&kOEETBwQ0Xnfaz/pSA4oQJ/nU1WwWgH/u9TK%34Z/x5hXHtQAb
+HTTP://iEYF-043APHCKLC7PX.qB28RKI5NNRTNJJ41MVKDI53GHXIMLM.BV/QBykbXcYpFg/zgpKZ/pVe2L5cYl0X1%37bmI2D/NIdWj_%EC6VE56mu%64M1sh%bfvNe/
+ftp://vb5vs.P5f5jmxq.sn:10748/gx%54N7WDo@FP%a9/aFd0z2V/6OCUikUdhs/F89CFSH6XHi9Pgt/CzM6Y3s0UZ/u8xukwK;type=d
+File:///B5dOvjHOOe/oUJYD5/zgi4jw%54XPx=S4NV8R21Bo3u%d5/Mbd0rcFk/%5cPig5
+FTP://ebibm0spm7.cat/aalird/1v6GldpVgXA/9akBrbVRE/FbH97%67/YfhOfgG/gPiGQb%D6?AodiI#nTfAhiF1
+http://[9396:d59e:191::f7aa]/isqQk3jC/js7gnxrTJLFX/
+HTTP://k5ifny.sa:32595/8XvVVW6Tp37x/IF0IkevEa9jqkw/58g3p/MZB%94sVPjmF7/wZD0BUp?N6P1o=nH:%5840TZNN%37eJ+AJXoM5t7+UhR&%3FCC(O96dC=e2Zqj-YxOMwv
+2hr.p5v.6aqidmeffi.flfqfx2znf.cup605.v6ktei.mi6.AQ/ky~LSgBJ/3JZhLix/blFeDQRn
+gtf7abvdn9i7cr2e.YE/-1vj3Mw/P%CEXiCFd2a9/vm
+http://3rsqw6jt.cv/n5e9YJBevO5c%6e4rW%a8/iKy-raSDu/.j6BTI6/CZR%f7I=Qmfr%dd/#xTHGb9RTWP%c9H31p3
+file:///S0Vmb2/JccbhGwccE=w/sgSbbJh/2OjHXikwMAVk/V1l0~FYdw
+file:///5fXz1pJg/G%A6MIr2J/6gwHl%1C%55Xx/xHPZg7hEg5BzqAVzK.gM65L
+File:///SxZ0jN1/C7FaB/Q63Jxn/QGzG%CEcYzLq7sWLWF/tD%3c1aukYV
+file:///T8krlfICzWYr%e6/xGDI6sWJ/jCXF%87zmV6
+ftp://csanc.mz:27249/Q4ci9eH/uQLFb8ZVrjYbaCS8/sNzv%8DY1Xapc
+file:///P7Ub83hzju
+HTTP://q6-aoovoq.j-joev5ivayrom1t474xlqxrfro.xn--wgbh1c/WiS76Kh&O/IDDo916%22Vp4/iZYdp?%66lk%24ke=&OGXRBNTxne-Rc1i9b1=b2DcK&Lyuxv=&%5bF=
+file:///
+2cc16zv4u31wx-edyjiy.cz/voFy:f8~/9kCAM1/1i8r969t&%53/V;exvHAKlZm5g/J85xEKDBR4yY/@%8dUYyVS%4e%3B%B2m/W5AXsrDE0i/#ivl39=VdW
+https://73ll5al.MO:10068/5K%AAf0p/#5deD$x1
+FILE:///a0esBQEE/
+qnta8.f9284.5pvu.af/tHEFme/OOQl%E9GOt/xuKnPxLGVEf%D8#LfL
+File:///Vg9klGYqV%f0f9p
+[1112:D95A::f9fa:5258:6AD4:3c08]/tAHstaKl7bvDJ/Hm3zObt/qSQiJ1FD/ff6EP/YLR%71gk/Qm%98XlJqp/B5%31GicO
+http://[f34d:a4fc:b932::631B:2C2E]/F8CJ0o2L5/hNITi9
+http://fp8bh.zm/R5WFY9BBHOmi3/OyhE6XN/7tZGprtgW#hrKj
+mAIE.mXK.qq.3WVWRXC8BASM2NX8GRC-L7O.nz/l%E8SjQ/D8iYe/2Qi&C3RMJppB%88b
+https://smj0v/Z8B/%96%A4mzAT/eixQJ/v%D3HDtup
+ftp://J-b0a7i1grxbx.gt/MuPMg3Ly/r2iyJo4R4opO1Xj%C6
+vbhx1cl9dgl-asht.lDN0ESMI.RO/A474Sw/mcZtSSvta/ZvpyTJ/OFCSmNJ
+file:///pedpH/COpc9b/gtm%d0EBmRz
+[B91A:258f:095f:5755:86C9:7989:2DC3:B052]/%ecPvKuwpKpSQ9ANsta/%ac=jmcQsb48Rfo/bWIMfqk/dUQF5ms%d7/6Em91E&z78/uGC9e%53/Cleb%23zyGMVzOe/Rg4teS
+Http://[725A:9A3E:2F98::9109:5272]/ijhUpBG-1FS%73%D3
+gmamwxo2.0z8rwjft28enmc.p-5uyn.u6E6AXVBP.ph/gBkpM4WFysjoV/X591ak/tIRMD.t5y766HT%5EX/RSb0a/Nw
+https://mxfwd.gg/uwsX4/vnVUhsd/igwlpT%bahLI4;P0
+https://9g5pjef-db.Mq0tfjbmqomp84hi.rf97xmi3834.403gi.TC/sLVqu3UG4/OYh%98SQXVXf7Cp/j%deBNpZoEfAD60RV?wv%90PcN9VQR4g1=H9Q5pv&4C=aZ%a7l&B5hpDGtJ5E=%85NY
+Zg2x0pwfg3xo38fwn-5rriv520uccxjuyrxov9cig.fcr1xxh8.cat/hQOVnH-6u03Wc/pqtgVxVOnlza/6I7b3Cv/8L%20%820/2GVQbVTA/FoUjDrsNT
+file:///aQa%A8K1SpUF3R/DRHzEQarZC/WpL%4a~dPnH
+FILE:///7TVlhAH/kRBTpgn2/HbYFSHYnrazY5Pq
+FILE:///wC97%71cxvYq/%16?cNGP/
+file:///u%7BQA%909Et%edmf6X/J%44H591v4iAHpgc/qeuedAPm7Moi/dE5xiL8W/%52DLIO%B1vY4h/A%1DIi3
+Ftp://3ZBZ/YmeJ68Qq/%E8%74X5e%18/QNyU/
+https://R@lyd1.xtccruqswon.GR/oHPO%79jfl1/rFfct/TI4I5pfjn
+file://Rcpx7se8pzp4sj8ooxrlfyi.cpj--z.tl/ZQtA5b0%8F%665G/RTr%2BytU/4C.hmyu8/F1hcJ/PiHi4c%16VEN/66dIi
+ftp://wDIXDXTT.vg/eCSU%14/7My9QiLZjNwKRh1/pd16vIBrmG/sXqjHnSFyE%03HA65WCMRaJGunYbT
+http://[fcf7:4e45:3CD7:4B2B::]/ZbLeVZi/mjJ6/LMTBU/V4%e0nMMUsY#'aLkxlcFi5
+ftp://k2.jALPBG.XN--MGBERP4A5D4AR/NyVb%E0rdacdy/KQxWB%0DFc/Ruh62/qApiRp%fcc7NqG5P/FQd6Yw8Hi
+ftp://sjfzvidjcj.ae:55965/r7feW9uA/33qU0/BKlBWEwBw/w3nSd
+ftp://2k5.lfssxj9iatcd3056j-rq0/Bq8-ZY8byN/Skg1r%290%40%23/X51QAJ7U/H7Ir4nHaQ8?QOW
+http://ip0176.JM/LthE/E04n2pcGJV?P8=dCpb%e3q
+ftp://072.017.130.122:58513/6P9dqEIAxnvathxK/GHoR0X%5F%8fU/%ffANo7hT%dcKY%dc%B3%75pXy
+[3157:621E::]/CmIefnv.v91v/I%E6OmZLafDS/a7JoSqx80BC9/iSPk18UXH/g6xdyYNSlT8/o34wEX?MLP%993E=%1Fao&nRDo=6svN8+d%4Bq%30jky%75psOKb+h
+FTP://zbtd.0doxocs/sDrr5d5i/%6cJnyS/5K8mb;TYPE=D
+http://1vkic.cmd-efq.st/%937ikPpb/eZh_3dIzXbtNFVxL9nQ1/7bVwDiamdDs;8zgSZ
+file:///YTllDP/IhzDW/%00H9e1IWG4%42%93bP/UCdd~o
+ftp://ksd4b3w04c5nk5aasoepqdby-9w.sl/pNe8wJ2LkrJZ/XJSanvU/
+http://oPYQ.nd-egq1mkgtuwt4ei1ax.GQ/JRpv
+ftp://171.235.253.31/gop3Q%bcUoW1/38aPN?
+File:///XoULHUnTn/zYp/#SlAGu
+0kx1j6uf.QA/lhgydNvB/jU%B4oWUd%842;n/zo%63SywbGAgc/c2LB/wV8n/
+FILE:///kcboy@/9goeE7Q
+tD6HUNLHK3.u-06.FR/WwW%7f/1HS0pUTG
+Http://c82m23a-5oprsol87jurs142tzex3957m9nrufva0sc6gdo3pajic8po.H5m3wt.1RU:11878/Odij%A65n/Am~mzHC/#ArdWk8
+Http://cd1.es/w~Uc%455aE_/wVJKfr0/X3vnA/ImG6Z
+http://5ect9i8665yca.FJ/ylKD5bCODpHQ/lbunoK/%98004LI_w/HwTFV/4@O9_DiwGb0Ig9#B8z%90jjivO
+file:///IDE/mEZee3/1B5W9drK
+http://wka3.GM/%95yhyVy9#FFld%0CZGoiP
+file:///nAL4tAgn/UK?mpt4IE/.2JW4Ej%28uiG/LulMqnbE5
+ftp://973k1fnytm6y9hx87p42k.1whc75.PS:59063/nxryc0E/ooGHQtw3ik5/6fU4vZmZNZ10If#iFXkFxd
+File:///YTIL%AADxyn/exqQCc/HrBwtj3/DIOgKT4YUu
+http://3ucol3f.lr77xtr.LK/FNsRpDDW=/76bEzBTI/q30mQZ/
+9sb.7mct69t.ar/WpXcM8498S4F#k@L:'L
+ftp://3qn.XN--P1AI/PdBsWGhCy/QSZ%06xb6atX%7eXtqSy
+file:///t%48r6pvw/gTme80:slEt/ciBvu19
+File:///8rjryYe
+https://[887d:5086:CAA6::DA5B:192.032.127.177]/
+File:///v%2CCgt3%32kh5ZJx/~kf8WDLeR3XmmY6ap/.DEZNJ-ylM
+file:///KNINXVO67tBU/VWJdbMVH%a7uqRO9%ad/55Wlt5O41e?/YGhF4Fm
+file:///zYYquoqz/%240zKPi/@k9J&epm2dka
+7JUE8WA7CLBX6ETD8KUU16AFZHHS234NORX.tep69aqao2.int/iZjrUNXtQfBaF/Z%A87tU/XfvTnCVEY%00/FUyeI05%f4#?hZ
+file:///1?Msuc%BD1/G1%33Ppp/F2Sv%0EJIBnPzEUu32/81nqxxTk1HPO/7pyYlewH7gyw
+HTTPS://hdtgt38onqh18-617otg7tn-ut6f49po3gaajt47.m4O26.rwko060q21o.Am497x0kow-u.TN/nZX955o/JtBhKlvv3r
+ftp://28.118.125.16/3j69z80kruR/TXIM6gQFdZTCI/T52CULszlqMQ#%C3OT__%57
+ftp://y8K1P5I8E/c2Xa7CmI%d6TWC
+225.022.162.113/ZF58s/%CE%56BA5rQPOLU/AUNP8rG/w8SHG%d0FVsZX8dC
+X6eygmy.1a-mtt.ki/WC9%a6/GH9mNozOi
+94h6rdisa-eh.CH:8242/I8Ik5%42881r/EsVYPHYT/Jw7%3A2%2778ggZ8u%60
+Http://89.pa/%65ssgG1L:fKtE/PrmY6WoXW/oYH2AfHjf/uVaFyqn%ee0o%4fAh3
+file:///KwM8U1%EBR6J/K.asJbs0/i1vCxd/ZthOZxt0IKQEH/#x:Q8vtaIw
+http://rP6.Ewrowee5k83.COM/5CId/KVp%FE
+ftp://l8AAQ4XL0X0HO6MF7.9d.tw/%98Vb%117Uy4/KyUMl9
+Q293qtnuw.vi/6fi1J47ebQ/d2EC4A5OM%FF9_tUNs/dk=?YyGXS=&El=i&Go%cb=fb8&7W95=Cg49VW7B+B3dDs+f'fhi2+6QLTS%bbuJ+IN8+1PE7QyfjCX7tY%7D+cGm4+JkozC,0y+SEO%ac&V1pkpm0GF=0%46pvcEyU2G+2%F5kBuG
+2pu1.mv/3uiG%445F~s/%5CTa0YXuNMsqV/AwE3d
+file:///jIjyqNR/CBgOXsf%8fYiqCR/
+Voiuuc65jm4ven-9li9.mii5.0h5xt6.KE/qachnQB/nsC%4ai/juYvC3yTiCp%06S8I/LLVvQY#p1jmTyx@W
+Ftp://ydhhq20m.MY/%ADNIfcLl66t1fl/v4%a60h/N6My%9AKXUvToMFxY/
+14.21M1I.NU/iqlGVazIWPCvV/oelkORYd3Iwsdy%0D/LcdN7U
+file:///
+https://07zje.j84g-9lx-673h.vwr.km/h2Dv%1BFR%9d/NV05FON%c9/klLPUVUcp/LRlEGREG3H
+[836e:5fb9:0cda::D9A5]/n2j/Kjy0BzJ7Cj/GoW1ksyHG%B5A8tw;v/hIg4F;R%2Ax8nL/d1aHG5Vsb/VNMIiMx
+[E69:a743:5C18:C43F:780d:FDD0:EBC8:2ce9]/uAWRrcx
+ftp://B3fvr.l5GW6REKV.GI/0qT%dbwWVXZ/3kdb0/kBQuFu/R@9WXH0
+Ftp://a4gdplaw.TP/zyf2c37ZfY/QaiwZ3l/CUi9.ado/
+8L.vg/LjRJZ/z7/Fkg9dwmTDSp
+T7wos.u6I.cJP-5HQQCA.9dutej.SG/6McEZ0
+jJ0D1X6C5CCNWYGOCI4NNFC5A5NYJZTCW65DHS.d1yxpq.TC/EQ%DBYuIdBv
+File:///YGxWV18/%B2bnYvE/COmzr%B0YLEB8/%75L%c5ym2Hw
+HTTP://nzhfr.Mlrs1k026k.KN/~bhI#qqgVS5YR
+https://z9z6ip.INT/1%1dXkN1P/KI52I/yo%FD13SoZz0?:z'X3xwoS=1y&lmDOOEVzwHn2j=xfbMj%67cy#bKedfyI1
+FTP://aysc5.8i8kj7.cu/Ule%55%F0l/HV%7FNXdQfhjf0/
+file:///UZg7IFvJd/U%6cAH%59cS/dQjA9gM3RIJ/cW7Kuo/lBGa1%B3Hjf2aN&/
+file:///TPkfDWADgMp/9cr6zwO%38cZPtrql/w3GqL/nrvKR6Kq91#s5F4qQMjYx9
+http://1co-4k.zzzqb.XN--KGBECHTV/WRGpnKFny/eBiU%BDapp/0cb5bJ5%24J8a#N*cE%e4BmH3Jse?2
+n7q2q9b.3-ve593.eb368oe.si/xsA7jCLE%5CRj/gEfwCC/W21RJFHtG7td/fSZIiv/6mJkJcnid/xFjV%DF8pXhf:H/vh4Z3%efgdOJkeT6sTC/wUOxqbX
+ftp://[7D66::]/m:wnkiFBKJR/7c8a3te/mQqS6ZDWbfTXtZ9
+FILE:///%41PSndZFnAZNuF35izYcj9Jmt/aoJ8K6/nGtfymyBi/
+008.245.185.106/0Aq3gb85/6TZk7/PVTk%b1G80
+ftp://90.188.10.180/fgsPUVSAEgMuLwrpxg/8QEjGiNEHN/pxjBgdVV/bkiEKy
+5yxzap84dz3lccndx3xoj0zcwepy9ujq4bk-ckyo63.si/%E89rzFXG/htVDvVdD11S/SLLVce1/%5bgcDSkD
+file:///Mr
+dm83f2l.vvlpnpob.7si.cr/RFT%18uMgARxsP/8%61%7cO/eZtPUg%e5FavR0XRe9wZZ?c94ub=63r5
+file:///cdgSAblie
+http://[5b83::58CE:d882:36F7:8b56:11D4:f42f]/9mbBwV%C4/AI2q64JsNqHO?tZ3=nATs%3CQ&lbSzuIb=/IJtfPRbcu
+ftp://gOD0KB6HB8JDGK56.l-V4OW.sj/KqqiLzCu%6a3jexLbLB/%6dBHZb%29z72YF/
+http://s65E1E.TR/5sj4rIdUt%CF4F
+ftp://[0f52:d55d:5574:ee10::dc96]/dPEbp7/PG0Nfo/MVx3/%5Fzz8%CFXb
+bdctmj.vzaax2fe.j8S2.ojfq-b1m454.g7I.uy/o0%28WV/Bv9nDwD
+https://k233JLHW6N.cCA13HZAXR.laiu78y.fleptcf.brva6c.osod.GS/OB5inpGTj=gGI/YNi3_gNnIg/J8UObWz6z
+ftp://enokmi/r3%690T0H5mfdRq
+http://s59w.cg/nJoM7yv/Z2T9Xof0hNGhl/N0%6b5Sbrbtjj/
+ftp://qytw0h.hkdt2rm.gd/3a1WJDglP%cfZ
+Q-2pgsvifg.yr2ix-c4avrjwva.kn/_zD8ad/%8AVwQwOG/JMC314h/rO0qj%88?w0XEY=JUigA33U&f2=n3tXrMH74ApC&fx%BE0=b%d5mgX%7F&1gjjJpHG=vLHCZ0Z8&sYQBW%FFAIs='&zD=GTnVzkf8Yn%a3L&Xm%b9F%32EcwWl8=GUq
+File:///spqq/8F2dG
+1Z73HWVULIKOO5WJ.rEJGR9.nsscy.gf/rHEt;i5T/%50ZjYYJ3M%4dR/WlW0C48ocnb/NRA~0M#
+078.104.235.053/8KqfxznOtxC/ycYiTG3%11zP2%A1/hhbuX9Z%d403wES6/P0gg5%94
+FTP://58vs5.g0.tHI.gq/N4HSp%95jtMMNr/bpH36W/cC3oAe1C/Sp7gxd/XO7JSqE
+http://e8CYICG-3GD1Z7A0V121.Ya0j.Wy.CM/BLyz1kmpRF/nb6u%52/GpXGTv19#9?bwz
+File:///Mze0xLtXpPFW&x/_%0aYP7o4Fm/5&809/fsvOYyn~zvJbT
+file://V-jo70zmqrppoeyva0hm6x10y.UK/#3O9f0OYdx
+file:///K4BV8xTq%ccORyFI/8PzAVSZeBNFX%adT
+071.247.240.193/%94VOUi%ac
+27r2mghslc2b.Dwbpiqi8q.gTYSL3Z.am/RU80/KFcctLv/R8tG8d51EaD&pno5r7pDR#GWY
+mdfr2j.1FZFG4.VN/Xn6l%6dLWufM/I4FHTzlnWx%7BoI/ueeKx%03mfSA/%9a3PMEt.iSdeTVFgSnLi%C84m/6dh
+http://H4jk06c6mtprgjywnc40mjri05a.VA/7B%C0h%4fCjj80/TrN5HugANCZu/eMVdn4en/QUSLGhe?7yjqzvzv2r%b0I=&p%C32*HvmS%39g=wb8u&lTvA=FCGNF46U+?Ak.vpCAV%ceiK0f
+file:///cVjI9Ue/siOD/jynyp9%3FmBx
+http://u8ic-x8o.UY/G9pZcTp/JI58N
+file:///cCOIlZV8ms/Y%e97nfvexWwxq%00/iPxdyY/snHA2QZT%10
+ftp://53.151.134.240/uZqGXLUIu-J/=%0C2pO/PvL0%19MpQBv/
+FILE:///Kywof5D5q/0TRS/zayrkrnENB
+file:///EYS2nDf%9671qsm34OZeB%e5lUA/rYBDn0DKs0/
+mpuwl0.BA/MkvAvc?j%11K4=9gE%613&qOOEP0t=g7EXs
+g6tylc0.daeczh.4q.XN--9T4B11YI5A/1SbCR9cX1%3D/YfP8CpLKn5KzTL8/Kj11z%B7OuqJU;qM4P
+file:///TJa%86AczeCmM5QMhi/Wox~Ajl/WxUF%5eSA:y%0fD%E21/x%cca%d3Qgx/8iWJ5-h%26/fCK%01nQNrK8#ygTTB
+file:///~%303cUUVYTEaQU5%5DXbogiPKb/favR2rETEh/9TXM%15u/nYCOZpZgL
+file:///mJM%a1/jv5%53QDqE/bFMu0CBp
+[a0e6::]/YR5lwpHlG5BPjr2XT/Pq%e4kWAmZ/ucI10P1
+File:///8YorWt/#ToazT-v
+http://2igfcm3qy.wlcgdxv-xat059qnx15a7qp-p-p5oph1c8.GP/hS4Aqy7SmODbaOH
+3s81j.TJ/pS9Jzw8:NWryq/%00Kh1/Y7Rfoo7haw?pYq7Efg=
+HTTP://k59s6i5o.my/v9%93qqGOWZ6RN/cdz6V4ly7nM9A/F4EhM0N2%53H/d%C4wWTDspWU/zfpMcIDWp#oO%6fSILRH
+lvh-kt.TN/xZghTR/yDiD0a/P5D2%37rFa?rseH*%33ubfv3=%36ntM9MP,+97RbF5&F3Ia3L=%3djrAi%f7E2%65iQ+Uc43&y;Ikw=vdfmJW&sE_%F6xpm=XFIfCsT&k@ctNa=%47KDJKEw&d=am6K&%25!BjLNa=iqs.l
+http://Lhe7w4f06qt8tif2af1k6s552hlbk.mfce.cc/DEqiQf/GLpkeKZAxhSO4m
+Zy-iit.Cth-tuvx4.au/dl6DMUqP/wAeKXt6
+File:///35GJ%C8m6ubg/kpI4iEEx
+dbe.gkg.EDU/cJ%fbQ3k7pwp5/arlH%DCD
+Ftp://e8ni0.5etxvrjvn491/tP8r:UC/faEdqs4P/v4zJax4
+https://4PI.gg/fFtQoVp/b6Jf55/YEc2l7dE%CA
+http://gpu16lz.LS/9e%daJrwQfHEpFvsZ3jx/c4STIJ/CmvEGAUx9f/
+file://ij9anjtok86ro.uN-BGDQ855IB.sDXAQR.5kr8kz.3J3M8XRM.18r3s0g-6.4rjsmwue0lwao0og17d-5-1.F1h3qgkul29yw2t4p4se5clomncxhmoy.g6c9tbz7.pa/5LMtmbl/1tfIF/pBOV7Hc
+HTTPS://bF2RA.kw/1TA9pTTBg/nM/VSRo%85Kt?%62mxNfo=HDowgwkM3&9oPOLH2=yKOxIe+YNtt
+5.Piba4ac.JE/55M1H/AZXdj
+m-k6-ej7x.XN--HLCJ6AYA9ESC7A/suVrNQSIj9/TmRhHbe/o&0dbqR/
+ftp://242.228.138.8/o%CC_QjILS%17aYH/%caw8CcVZyPRZ/
+hGE9YH3D6.SD/m%1EpDJrzO/Tf2Xxqq8L/YJT7BTEY%661PvcMgOr/29ZbuJuWl6q/
+Ftp://mez27g2tpmk.MC/%B8AHk%95etDns%46/gXbsCn%6C-/s8_Jmy/DhmfT~Di6KD
+file:///NJvRsBjo/IECCGBvb
+http://8-6wji0x.tCVT41X.k1PS.15p.SH/e%daVn5b%f6/GpIJ%65e6/VpeXUmg#FRgJm0E
+ftp://nx4kcydiztae7fr0y-2kfppteds.gq06u.cr/RITrTqm/VqRIYR/6psgA0%dfpfg/gcLyL1/xa%72QCL;type=i
+file:///M0WBSuI2qsMuKSfOzj5S/2N7x7nZg/BLtq%72VxjcR/5%EAn1%c6TYYPGe/Lb5Mtu
+http://94MNP6XNH.0mgqklz3t9g2xl89x81-a3hifmff89nahy62jeyhuhe8lhkuafizl.GQ/Ajpa4Z1D0o/aVv748s/NAIWCkWCD2hj/7MZS5c79DmL4/ieQ%21gw?oEPqIN=Pm9nPx54%c1&j1y=C
+ftp://rKI.COOP/v0pdu1zj/ir2UM4X/7k04jhOKPVN/7ua%E5y8p/bl~yS
+d-IJA.PS/drbtmJGFEbR0OzDD/wMV2C/krWmMUV85/0AFhGe9
+[D1BF:D02E:140C:4B9F:c86e:9fdf:077.173.119.180]/A07Ox%86Oae/yhjXUMut
+http://A.bi/J1GPah/OT741dJ/Jh3Z0xb3
+ftp://6VMV.t680F6.ijsru3.bm/vlJmkK/go28Jr/qUtmHmqhj/ykeAVxYoe
+HTTPS://oi%32Yp.@a4mk0.Teyu0lojs62d8l96qiym2v477ixatleasrgft4ttpbfel9r.BW
+x37MULG.514yrp5.Vrd68eeufzt.VA/fFMWutSw0d/Gr%BFun3/JH6%DESQV8f#gn+NM2
+http://2.88.82.235/6bhV%BFGDy%ABd/g84ly25/;4AeID#
+https://a860jcplfoodo0yq401cdf9.1ZE2P/NLArIzMZ%8B/6UiHWMMGS79/?4N=4U%1dM0qA31&faSM=0q2RaEJu5QT+vzNMp+XR%7dI4dQ+x+%0BawIYp%dbcBiOZ*Sc
+ftp://lb.NP:46239/xwyAL/m74%9fqj4gttFLg/
+s086j1-9.Nowi9s.fm/16zr3s/mvzfyWbB5/&1mzA:X-3
+eigz5dhw.jynsrju0t044lcc.3c3bfm.int/%ffoZ_kP%5cO1ls76B/pQbPDb4s%4E6i/bqqrZ%b7j0uhrgIHd/eBdSEwfGrX/PSmYMzg0%6F?Qr%92y11b3=&L;5CV=zJao%31Tmm
+65-ihklk4j6m.f3CFA.7kj.qa9rcww7uefzkpxbf87ni28b4a1i9rjqy9a.5texnqlc9.cu/p%CDK%b1%449LH/IiLqpww/HmACJI/r46TA4
+133.38.197.20/pbgvKM6W%BCEBN/Cvcu0&#idQDycc
+https://4I2GL/cGtyrs/%A8m5%3fekPsTRWlB2?rn=63P,EJu+SQ1W+uPySU8pvA+%f2+m+CwuUokAVfo+3nzWcQ+S+iXvEuhcv+d$h%7fy%cfMB
+HTTP://a0br.o0gvxf.kp/zZkWq5hfxy/q0x-g0In#bd%1anKx27
+ftp://[1327::117.246.244.220]/%91y4%09/
+ktefq.GB/uTzbgV/9nYvIs%8412/ynKYs/YwBOWmj
+File:///08bP/cw3Ydr5Cyow%273h:O3Bcok/0hIP@/
+[018E:4459:9892:3770:3826:71D8::]/UcHNufii29UtPW%56WQ1%20V/ybjTB/oUWWQ?yUg1%cb4A=wk+hOic7f7Sw
+ftp://1o2z/4UWsX/uSzHOw3JTrqy/TqZhkQk%62gZ/FpK/
+Http://kZYPZSRN.1m.UA/QN9n3Nw8kPAgkCB/SzdVcxryKou7mMG#p6at77
+http://se9g.s7-5qnlmsi0npbr8ouxuey3y66swspkl.y4.st/xfP7%066uXWuOu/clIFhy
+ftp://D4j9grnngs4a61b.im/f35gw%53rTeI5/#Ff7A0YMs9RG8t
+https://zujspr.cr/zy14P7FG3/Oxznfe/P2zpT%38S%FFVfP95Lh/nJJgzX/kcVuHCzV?Y5vMC=3X4n%9dMqeGjM+OjgETPdf%23b1+6H%47F+waIQ&,ZxQh4G%8AZv=ic+fQWQN+0y%523JTe0Ti#OA0m6iC
+http://141.171.118.17/VLnEb4Y
+https://sla.aowts.MQ/KbP3AV@wXFSgz/TauvS9f2/zvGpvN.e8a2Kw1ho?jYRUP=L_IAzw&cj0ux=xz&lrA%8bS56%A9=SX7NjQ
+file:///
+FTP://h6.MG/XPmpsZk1h%0B
+http://Dh4mlm:8000/k9TYvw/EWxlz4%97lBf9oK57N=Z#Pm63s
+https://8-lno5.KM/Uco2E%dbYPx~/MzKrkZ/rDpXB7OWtD?Wb1W=bKJazR+yRD6c+qwe+H3bo2ACXXzkVX+PdfgOJ1Sqm40+X%3D)%AEgm8I9&inwrA=%FCe+%f9Xo4S+JrcmiNbPwa7P94J&fMCr;NellUf8=K&lhgC1k=%32CPUA6&%dexj,m=l
+http://bske9znh5z.mq/rF739Qhneaet/NTfzZn
+http://B7z94v/
+FTP://p9s.hh313n.6k3.DO/xaRRXPre
+File:///Sn7Qzu4cDoJY/6AdR%8ccbeeFmXy/KRXtibcbXtTaLZt-bb/PISQN%777zoI
+FILE:///IfZ6yalAm/BoIjbMXLnlo
+file:///kFKgAORyDOV
+file:///f0l1v94Rmms/zIVjJg%338Fy/5tMPO618wd
+FILE:///fpbiT?6/%0B7dUkWR5r%AErqLW/v2n%bet%b3wV8Yzi80OJ.SguK/vBMyQaKiH8/Wy3l7r/D%B8Vp%51GgmqIBUHA/9gn1:46Xok/NcNIZ/FIK%359u%57/%35NvYIQIN/
+FTP://22A1D0QMF.cmcve.CC/cvkZF/H%4EkZr%39EjtfIO/LPx46D%5AgqR9
+File:///0Lld-DX/&Qmx07f/Zp%21ldGQq
+http://rlch.COOP/%bcKE55hwH6/CKHB%2Ak/Qzsn2Rn1p3RUc3H
+http://h6d5js.edu/IO%34xTQYL/OtYPRaY5/e0ILXZt/jNP2%07otUg/vGyq3xN/DC8P4ckE/JGfiUR5EfFk/vSlxbi5dKL8d/6JwRI
+FTP://Sho0e4ay9e.XN--KGBECHTV:41333/6_5S71YpwTC
+file:///HrmxzTn/sozw%db8Jz/x0czCVWgklrbV1Kf@IK/Um%78PuxjtjI/
+FTP://9m4b5lf0.Y5dnwnduzx9wha22ayztin-t7hng5b62e07rzsv55325xgdrzwx.gov/pmG%45dhnQZ
+ftp://t2ik0rgw.krjz72-l.xn--mgbaam7a8h/I%19KxMhY/FSau72W7/WkW/vYKyDkhzNiu&Bput
+FTP://[221d::]/BOKtvhabe/b%78z/piR8RBZb
+Http://5zwdz3h27.q9l27mto-5v0i3i1yu8oyl.TN/wk91N/X32rxh/cmM%01iQPnCulto/
+FTP://gWUFGOXE8EW.1g9vse.xn--wgbh1c/ncQo%42ihY/Tyk216/;type=d#J4A9HEH
+FTP://5wudd.ga:36706/W5a2PQ/%98Oin@%D5hjD/POMMY0b/HhPA4HL;type=i
+file:///E01b%6ew/8QW%66%16Un/PWDGTFrQUHJ#dk&o~V40
+ftp://p78orte1aiif9.zk-l-n5drgvx2kj6i9e034ck587-utyikjhal.qE5RJ031K2FAN-35.v71jyg8l/wgwpnw5/1WPLlSc8/3RZzlIEZMlC8/ytaOFdSuPKO%72T
+tri9.Fyhn.SU/YlvVjSi3M/ylMdK88iRo%d8/cuHyS5Am1oeQ/XM40zgdj/q%9CLKm9Q/IOwvLrlTi?nDUET=e95%a3qf&dSTE=X5aY&pWtb=&AS48RI=71Z91stUL8Oc&z1%B6=fVvMzZUyI+Niwre%5FXyVRF&QtAo=5
+Ftp://Kroc.Ls4-tkd7.sg:58219/9tq-FJyL?Qb/e0alokGZ2/MKTHP3Wsw
+pmg4ty.m59480p2f69.fV.COM/X98xZ.E/cTleUeS/9P6zeVQjfd30/eVVvE4/Zyxm1SSqe9u/WP%a5hS
+6P.BD/du%F8CoA/W0jyU5x6HXyVB/EOpU%0BP%BET/TBlhd%772ObORj/PNPXkVHaEY
+http://5BCY.X3.SG/N~63s98IV2/?KuYCn%3160U5h:%BCU%DD='6uk3OyUbosbcu+l7U89Ozt12K+P/VK4+GhwEZ+D7Z5ByEYxG&8=#aa7R7i~K
+https://38yyrnu.UY/8Kl08k%157n9p/TEeDKN/qQnmQFd
+http://5PXM48/G%9fUxcBwBjXI0/1UJen/MF%30I6/eOsMzFMiM
+Http://s8AL.rc94r4iftx7qeg4cbjjv5.za/mYk9UAydyn4q@w/T7K/dd%8aIXPp
+Http://130.165.027.114/o8bwef/X%70neu3uGKY/NU%f8xTKW0;hTKK/V;%edBnJYWG0MI/ZlDMtVPK7?k1N:WnR=%3DNffenC%67+sf(z0U!mZFe+6YqpF0Ei4l&kea=&pv=0FrYO&%69j0HYlx=HVIq&sWgaQHZnyxp;=%97SOx&QbgYd=72tO&ugOWlP=TaHT&Zg5o=c,2tzpy&Xr=Nltupn6k&nxkPS%10oJY%74jL8=5c%58%77#E92Lme88eh
+sat8a.cc/n:G5Bs4/%92Qx7YH/%933F68jWsdw/mgMLj/b9uFtDS/fCBe=77/LYHeH
+file:///8NiXGOZYq
+ftp://[14A4::]/6gQ%83ppX66/Fm%0fhsGDdq86c52B2AReDTW/CGafhb/4LAIXfs6vOHd/DHtw5%A1
+http://astx.i8o5jdypn1ly.LC
+Ftp://7j.N@Ptavog8.gh/%FDJUUJB/nrC6%4as/AM2BxLCU:fGwm
+file:///LD3OAKQVR
+http://jVVR4GZ.BG/XELY1/P=cusbVv5o
+HTTP://4fx.3kt642w.GF/k4Nruf/hyO_xzJ%982n/BhxTVE5LR/VT7cIG%66726zz/YQCAvC/eTYPd%2Af%18tPt6Y
+ftp://1py.jhl5-h.53.39PN2C.xN.ps/Q6kM9aOm7
+1MRTJ51.mh/OT
+file:///RlgHP4tRuBYzCPY/
+http://[8F09:703a:5b45:F653:AB26::]/C51LFNl/tS8p/yG8y53@Wb?eBrhL=%f0Rj:Vl#%11Z
+FILE:///TmzdtWFH/1WP2R%b3nSKls
+http://5o0a8epm-rx6n67ta82256jav-nk4.lb/HbOqUc/TIVeqJ7Ohp/BjDwRDKJ/JZO
+File:///AvnO.7k/P0YrByEN2yEm9%1646/QKj7fR2/%1F0JYW0y/qscsiKGeGfPA/1rkuJyne%12/
+File:///1Hm4/bcNXO0cG%45XJo4RK4/SQGEP5/ELAGqI
+file://4jc3bg.zs/WfjCr2aeWME/Nv4A4B/invk2d1h
+Vj1.Ngq.LI/FR2%b7RU_z%a1Tf2vy/rysXmZ0/
+Ftp://wkws.yi8srfw.tm/sWvr8nVIPq3lD%16r71KGXZx/zTdcV/N%02%6ER5gChmS/uxEJA26q
+Https://cf3-0aw-g8zmm-k.AO/mYGm9AqQW%E4q?6u=&rX=
+8vv-rhcodmrr42jd6zmrnl7xa.F1igvm2.RO?rQOIRt=Q&Z8=1WyCZjZv83+lpB%7a
+Http://009.130.112.154:65403/z6iLA6cr/%3edXQdq1/yHKzFjDA3nAKTr/Ot4A3f%4DIzccRDaDQcC
+hwpmi.upmzdzzhsrz.e469.ee/SXdNeY7NHR6/Vr6%FDr
+http://[C7E7:57e7:b08c:9FCD:4B77:4de1:229.020.164.172]/LnIzKLn/StXMmto
+Http://2-6SB2KV8V8MV290SIC08D9J7-IRM9FTPC8ZZ.hwo9el74qqv1.zm/tr9K2BSFkbU-A8wJR/CGEL_82/cnMuBB%a3j34
+file:///fUtCm%b6qNK/lltu?NvBAhM/sJ8pOm:/jJ18OTM6U%f5v%3f/
+http://76OXC.pn.GA:15181/OPErhH1cHtl1ba/eIPkR6%1EG/8fVd02k/Ky%b0D5izq4k
+ftp://154.108.127.0/vGpMboeazp05/usfmVeitt0pf3o/Ue4OMVT/sJ9BAYSLje
+ftp://ivbv0.zCR-0J.lku/6m26/7tElM/%b2%0BI.Ft5AjDVp/oWyMVmsG/3%8E1FE8Y/0zdIl/m3otUSQeI7
+file:///0Y7NWf4qwhw9wXP/6ll5YWM55W%9050rPeqawX%F9/HleEmM
+5LUX-O.q-33d.tn/smzXQJn3H/81mg%4de_/jb%97hT
+http://84W32/CCKpkt/c0bqCnoQ5Y
+ftp://nyqaz.MT/0OfOsU7S1H9BM/OjhdD/izbR4txUY
+8wo2j2c1z9s.ef2ki0mlvvnjm5vfyu.t5a-yb41uykgo5kn1qxzffhz667dty8mytg6ir7os9hoxwm2.mw/%39FEVmD/%a4qRT5W5qW.yR/8XB9NHyB/
+http://rbf6ezzlhpe.hk/%0DK8/IXXJAsC?mV8vvDI8K=6t9%6EG1Dt+M7N+D5n@Vd79n%d8E+gj+ofnZ%16loobN+f3-S+e,IH&lnh=
+wu3w.0J5.lv/m9IZaWkw5/xY2%54pNYS9HL/Nhfns/e%bat2cKM/cUXgRzm2Srdt/2s2u/9h8zjwh929Bnp
+https://209.73.217.17/dJvsqDH/RH6Ok_eSc8wO5/BOJws6/9f0DvXJ4/?%ea'Fx=P&6h3zz3eGCtK=4MF76p7Em
+jfajtdt5k6gu11la2jbih.MA/zcaTNUL/3q%31eLT%bc3S/L6v2rt/WtbA0%45~TIvPD
+ftp://Defi-z.gr:16993/=7IIaMpVy3OLs/QtQD7qF5Vr/=RVbNDH8/y3oUHmX.v/Td%dcbiGlArA%720
+ftp://[544f:e60a::8772:D633:DA1F:081.021.019.189]:62615/%CB6Wy1K/X%0EcoPQ/IgnCMLPynfx/fdFHb
+ftp://1INQM6.4y.RO/
+Http://T778hd416.g9r96v.bs:64804/GbWp%47K/zgTKs/cBHzmYZ=AI23VY
+HTTPS://6hp3j2y2tuakzv1rnq9vnvn1w0j6roo3if:58975/vH8BLTu3hzkk
+ftp://Ye1dfbl0eae8lqiiqaojj.JO/8EjAq0TzD:/Bz3Pm2qyWo/ZX58A2/yjn%9F3xJZjsVhw
+66.242.9.138/CYHK1bGpZ/5yyVD%cbC
+nHZMBEJWO.ST/ABXauli3wuJ/WUxhKaZJg
+ftp://[8463:c210::b5d1]:34094/8%AC7Fc/Qh6%62yFExJbdaB/0cAZ3iSKlk8sU;TYPE=D
+http://vmlyl0efotpfd-tew59kcpsi2u7qd/UbXy1Cc/L%0cwnzmdjz/?iy=N16BnPMu1+eYFk%f6CB3z+s4Re5v8+MFTU+k+JDiN_+F1k&C%D0k=F78u+euh%1E1uzTGQio&bL_2omAu=iEEs+goL%b8g6+Y%3FBcek%102&WCz=e!Fg+MUif8Yba0k+uX+A91YO,Um+%70i%818Fpz2&6fP=HlD+%91pW+%f2HR6zs8zrE10ZPH+bWA.BB6k+Df3w:X85xDnDjSiPY+AyDpuSl4VEVTJzA3g&OtUR6=
+http://bCNNCLT.gxa2sbn/lAFakp
+D19f.oD5.bb/xUG6W8VxTcjMG/jYMuWlVMygf/UtIwE13c/%a9wzpO%AFxQ9
+q8HY2P.r5T.AU/nc0Iq%28QAF/#yOD3%b3UA%d79e%1EmJp3
+dPY3X09.AC/STpa%97U%b53yKP4Te/%71KZZvIC#nA1W2z
+ftp://3gb.xgjm/wF%ado0cM/u%0DmCW8L/d9Ss%61dKQ
+6m.56xkyt.32O.com/ToEAr%BEdi/xBpPU2NqC/74sgdq%BD9/WSrx5/5ldupD%47J/9boeZj
+ftp://s0y6r7hg7.XN--KGBECHTV/xQizIlOK9/uxho7%bd/RvxbFGQ4o/O%42UeWF?/GAZ5E8b2/eRaq/l:-1ASwSpw/2FkowF%12Ss/vtCq9dysEc%1ee/
+[d18d:1707::]/NGZMInsLF8/kgC3y/F66qc1qt6OWfeS/DyngWA
+file:///%55A4VpGsup
+file:///WNEw%bfTWDLF/s%A9oZoWUo
+Ftp://2tdk.Ube6velthhhx8o.GM/bUH4XycSEKkTE
+ftp://7kxk4ujzz.kp:32621/hbop0%25sK/rw7RBE0lTN/tX5BLF
+FILE:///IQExpA4kDvUfTkH6Bg/MeVJ4aIUbXCJf
+file:///SIE0AkJFq/ZPJLyYK/6hA3x1InlGm1
+http://047.014.184.200/Z_QdOwjzfBue4Nt/aEn/xuEQD/cXlnoxHIK%7d8h/1%eegEk7E0/8Ejku@r1Z/UZ4gG/%484zOJsP%1b/Lc1okbWRzN5UJ
+Http://w9ys35.wb55p6l.hxl.rs/Y97%58Lp8JjLZw/5L
+FILE://155.24.106.255/3VEZIT7
+d1y8zvhwq40bi3tom.hPCZ.gJ-286X.TG/ayWKrgAvF6tn/L4SgquZT6C/1DmNe/CI69rJ/%f6QrzZGkSQ
+lda5l5wc.XN--HGBK6AJ7F53BBA/pr80SSZ/eNM1%D50lp/Rc%8EimOET
+l13t2t.sk/O%2BmRkw/@0AgGL@NX/wgt&aggDcp#0IYe'C
+FILE://a6ys9a4.xj.BY/%99BGXp/F=yJtxc71/gvXuHuB9k
+212.072.006.032/6kV8ce%2e/%e7lzm-HB%4artP/zg6tWMW7RIG?U7=HAXw$D3sM%7DyDJ&Gt=
+http://[ea5::]/eIdv5xl/5qhxlOvzw%018f/N3RQQKCz/WzUnsSg8KA3/7ohHZCp
+file:///g_T81EaNw2nJB/1yUUT
+http://2XXY0MZ.fwa.791ck-2gx.bd/uO6FW?ZS5jE:=m:
+https://[8368:F154::f99f]/Y3h8FgzTYYpzn/zHFhQECC/CGtX/8v_~jn3Kn
diff --git a/lucene/backwards/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java b/lucene/backwards/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java
new file mode 100644
index 0000000..2545cab
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java
@@ -0,0 +1,92 @@
+package org.apache.lucene.collation;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.CollationTestBase;
+
+import java.text.Collator;
+import java.util.Locale;
+
+
+public class TestCollationKeyAnalyzer extends CollationTestBase {
+  // the sort order of Ø versus U depends on the version of the rules being used
+  // for the inherited root locale: Ø's order isnt specified in Locale.US since 
+  // its not used in english.
+  private boolean oStrokeFirst = Collator.getInstance(new Locale("")).compare("Ø", "U") < 0;
+  
+  // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
+  // RuleBasedCollator.  However, the Arabic Locale seems to order the Farsi
+  // characters properly.
+  private Collator collator = Collator.getInstance(new Locale("ar"));
+  private Analyzer analyzer = new CollationKeyAnalyzer(collator);
+
+  private String firstRangeBeginning = encodeCollationKey
+    (collator.getCollationKey(firstRangeBeginningOriginal).toByteArray());
+  private String firstRangeEnd = encodeCollationKey
+    (collator.getCollationKey(firstRangeEndOriginal).toByteArray());
+  private String secondRangeBeginning = encodeCollationKey
+    (collator.getCollationKey(secondRangeBeginningOriginal).toByteArray());
+  private String secondRangeEnd = encodeCollationKey
+    (collator.getCollationKey(secondRangeEndOriginal).toByteArray());
+  
+  public void testFarsiRangeFilterCollating() throws Exception {
+    testFarsiRangeFilterCollating
+      (analyzer, firstRangeBeginning, firstRangeEnd, 
+       secondRangeBeginning, secondRangeEnd);
+  }
+ 
+  public void testFarsiRangeQueryCollating() throws Exception {
+    testFarsiRangeQueryCollating
+      (analyzer, firstRangeBeginning, firstRangeEnd, 
+       secondRangeBeginning, secondRangeEnd);
+  }
+
+  public void testFarsiTermRangeQuery() throws Exception {
+    testFarsiTermRangeQuery
+      (analyzer, firstRangeBeginning, firstRangeEnd, 
+       secondRangeBeginning, secondRangeEnd);
+  }
+  
+  public void testCollationKeySort() throws Exception {
+    Analyzer usAnalyzer 
+      = new CollationKeyAnalyzer(Collator.getInstance(Locale.US));
+    Analyzer franceAnalyzer 
+      = new CollationKeyAnalyzer(Collator.getInstance(Locale.FRANCE));
+    Analyzer swedenAnalyzer 
+      = new CollationKeyAnalyzer(Collator.getInstance(new Locale("sv", "se")));
+    Analyzer denmarkAnalyzer 
+      = new CollationKeyAnalyzer(Collator.getInstance(new Locale("da", "dk")));
+    
+    // The ICU Collator and Sun java.text.Collator implementations differ in their
+    // orderings - "BFJDH" is the ordering for java.text.Collator for Locale.US.
+    testCollationKeySort
+    (usAnalyzer, franceAnalyzer, swedenAnalyzer, denmarkAnalyzer, 
+     oStrokeFirst ? "BFJHD" : "BFJDH", "EACGI", "BJDFH", "BJDHF");
+  }
+  
+  public void testThreadSafe() throws Exception {
+    int iters = 20 * RANDOM_MULTIPLIER;
+    for (int i = 0; i < iters; i++) {
+      Collator collator = Collator.getInstance(Locale.GERMAN);
+      collator.setStrength(Collator.PRIMARY);
+      assertThreadSafe(new CollationKeyAnalyzer(collator));
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/collation/TestCollationKeyFilter.java b/lucene/backwards/src/test/org/apache/lucene/collation/TestCollationKeyFilter.java
new file mode 100644
index 0000000..4b03bf3
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/collation/TestCollationKeyFilter.java
@@ -0,0 +1,101 @@
+package org.apache.lucene.collation;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import org.apache.lucene.analysis.CollationTestBase;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.KeywordTokenizer;
+
+import java.text.Collator;
+import java.util.Locale;
+import java.io.Reader;
+
+
+public class TestCollationKeyFilter extends CollationTestBase {
+  // the sort order of Ø versus U depends on the version of the rules being used
+  // for the inherited root locale: Ø's order isnt specified in Locale.US since 
+  // its not used in english.
+  boolean oStrokeFirst = Collator.getInstance(new Locale("")).compare("Ø", "U") < 0;
+  
+  // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
+  // RuleBasedCollator.  However, the Arabic Locale seems to order the Farsi
+  // characters properly.
+  private Collator collator = Collator.getInstance(new Locale("ar"));
+  private Analyzer analyzer = new TestAnalyzer(collator);
+
+  private String firstRangeBeginning = encodeCollationKey
+    (collator.getCollationKey(firstRangeBeginningOriginal).toByteArray());
+  private String firstRangeEnd = encodeCollationKey
+    (collator.getCollationKey(firstRangeEndOriginal).toByteArray());
+  private String secondRangeBeginning = encodeCollationKey
+    (collator.getCollationKey(secondRangeBeginningOriginal).toByteArray());
+  private String secondRangeEnd = encodeCollationKey
+    (collator.getCollationKey(secondRangeEndOriginal).toByteArray());
+
+  
+  public final class TestAnalyzer extends Analyzer {
+    private Collator _collator;
+
+    TestAnalyzer(Collator collator) {
+      _collator = collator;
+    }
+
+    @Override
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      TokenStream result = new KeywordTokenizer(reader);
+      result = new CollationKeyFilter(result, _collator);
+      return result;
+    }
+  }
+
+  public void testFarsiRangeFilterCollating() throws Exception {
+    testFarsiRangeFilterCollating
+      (analyzer, firstRangeBeginning, firstRangeEnd, 
+       secondRangeBeginning, secondRangeEnd);
+  }
+ 
+  public void testFarsiRangeQueryCollating() throws Exception {
+    testFarsiRangeQueryCollating
+      (analyzer, firstRangeBeginning, firstRangeEnd, 
+       secondRangeBeginning, secondRangeEnd);
+  }
+
+  public void testFarsiTermRangeQuery() throws Exception {
+    testFarsiTermRangeQuery
+      (analyzer, firstRangeBeginning, firstRangeEnd, 
+       secondRangeBeginning, secondRangeEnd);
+  }
+  
+  public void testCollationKeySort() throws Exception {
+    Analyzer usAnalyzer = new TestAnalyzer(Collator.getInstance(Locale.US));
+    Analyzer franceAnalyzer 
+      = new TestAnalyzer(Collator.getInstance(Locale.FRANCE));
+    Analyzer swedenAnalyzer 
+      = new TestAnalyzer(Collator.getInstance(new Locale("sv", "se")));
+    Analyzer denmarkAnalyzer 
+      = new TestAnalyzer(Collator.getInstance(new Locale("da", "dk")));
+    
+    // The ICU Collator and Sun java.text.Collator implementations differ in their
+    // orderings - "BFJDH" is the ordering for java.text.Collator for Locale.US.
+    testCollationKeySort
+    (usAnalyzer, franceAnalyzer, swedenAnalyzer, denmarkAnalyzer, 
+     oStrokeFirst ? "BFJHD" : "BFJDH", "EACGI", "BJDFH", "BJDHF");
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/document/TestBinaryDocument.java b/lucene/backwards/src/test/org/apache/lucene/document/TestBinaryDocument.java
new file mode 100644
index 0000000..20d6db6
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/document/TestBinaryDocument.java
@@ -0,0 +1,115 @@
+package org.apache.lucene.document;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.store.Directory;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests {@link Document} class.
+ */
+public class TestBinaryDocument extends LuceneTestCase {
+
+  String binaryValStored = "this text will be stored as a byte array in the index";
+  String binaryValCompressed = "this text will be also stored and compressed as a byte array in the index";
+  
+  public void testBinaryFieldInIndex()
+    throws Exception
+  {
+    Fieldable binaryFldStored = new Field("binaryStored", binaryValStored.getBytes());
+    Fieldable stringFldStored = new Field("stringStored", binaryValStored, Field.Store.YES, Field.Index.NO, Field.TermVector.NO);
+
+    try {
+      // binary fields with store off are not allowed
+      new Field("fail", binaryValStored.getBytes(), Field.Store.NO);
+      fail();
+    }
+    catch (IllegalArgumentException iae) {
+    }
+    
+    Document doc = new Document();
+    
+    doc.add(binaryFldStored);
+    
+    doc.add(stringFldStored);
+
+    /** test for field count */
+    assertEquals(2, doc.fields.size());
+    
+    /** add the doc to a ram index */
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir);
+    writer.addDocument(doc);
+    
+    /** open a reader and fetch the document */ 
+    IndexReader reader = writer.getReader();
+    Document docFromReader = reader.document(0);
+    assertTrue(docFromReader != null);
+    
+    /** fetch the binary stored field and compare it's content with the original one */
+    String binaryFldStoredTest = new String(docFromReader.getBinaryValue("binaryStored"));
+    assertTrue(binaryFldStoredTest.equals(binaryValStored));
+    
+    /** fetch the string field and compare it's content with the original one */
+    String stringFldStoredTest = docFromReader.get("stringStored");
+    assertTrue(stringFldStoredTest.equals(binaryValStored));
+    
+    writer.close();    
+    reader.close();
+    
+    reader = IndexReader.open(dir, false);
+    /** delete the document from index */
+    reader.deleteDocument(0);
+    assertEquals(0, reader.numDocs());
+    
+    reader.close();
+    dir.close();
+  }
+  
+  public void testCompressionTools() throws Exception {
+    Fieldable binaryFldCompressed = new Field("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes()));
+    Fieldable stringFldCompressed = new Field("stringCompressed", CompressionTools.compressString(binaryValCompressed));
+    
+    Document doc = new Document();
+    
+    doc.add(binaryFldCompressed);
+    doc.add(stringFldCompressed);
+    
+    /** add the doc to a ram index */
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir);
+    writer.addDocument(doc);
+    
+    /** open a reader and fetch the document */ 
+    IndexReader reader = writer.getReader();
+    Document docFromReader = reader.document(0);
+    assertTrue(docFromReader != null);
+    
+    /** fetch the binary compressed field and compare it's content with the original one */
+    String binaryFldCompressedTest = new String(CompressionTools.decompress(docFromReader.getBinaryValue("binaryCompressed")));
+    assertTrue(binaryFldCompressedTest.equals(binaryValCompressed));
+    assertTrue(CompressionTools.decompressString(docFromReader.getBinaryValue("stringCompressed")).equals(binaryValCompressed));
+
+    writer.close();
+    reader.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/document/TestDateTools.java b/lucene/backwards/src/test/org/apache/lucene/document/TestDateTools.java
new file mode 100644
index 0000000..be6fb93
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/document/TestDateTools.java
@@ -0,0 +1,199 @@
+package org.apache.lucene.document;
+
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.GregorianCalendar;
+import java.util.TimeZone;
+import java.util.Locale;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+public class TestDateTools extends LuceneTestCase {
+
+  public void testStringToDate() throws ParseException {
+    
+    Date d = null;
+    d = DateTools.stringToDate("2004");
+    assertEquals("2004-01-01 00:00:00:000", isoFormat(d));
+    d = DateTools.stringToDate("20040705");
+    assertEquals("2004-07-05 00:00:00:000", isoFormat(d));
+    d = DateTools.stringToDate("200407050910");
+    assertEquals("2004-07-05 09:10:00:000", isoFormat(d));
+    d = DateTools.stringToDate("20040705091055990");
+    assertEquals("2004-07-05 09:10:55:990", isoFormat(d));
+
+    try {
+      d = DateTools.stringToDate("97");    // no date
+      fail();
+    } catch(ParseException e) { /* expected exception */ }
+    try {
+      d = DateTools.stringToDate("200401011235009999");    // no date
+      fail();
+    } catch(ParseException e) { /* expected exception */ }
+    try {
+      d = DateTools.stringToDate("aaaa");    // no date
+      fail();
+    } catch(ParseException e) { /* expected exception */ }
+
+  }
+  
+  public void testStringtoTime() throws ParseException {
+    long time = DateTools.stringToTime("197001010000");
+    Calendar cal = new GregorianCalendar();
+    cal.clear();
+    cal.set(1970, 0, 1,    // year=1970, month=january, day=1
+        0, 0, 0);          // hour, minute, second
+    cal.set(Calendar.MILLISECOND, 0);
+    cal.setTimeZone(TimeZone.getTimeZone("GMT"));
+    assertEquals(cal.getTime().getTime(), time);
+    cal.set(1980, 1, 2,    // year=1980, month=february, day=2
+        11, 5, 0);          // hour, minute, second
+    cal.set(Calendar.MILLISECOND, 0);
+    time = DateTools.stringToTime("198002021105");
+    assertEquals(cal.getTime().getTime(), time);
+  }
+  
+  public void testDateAndTimetoString() throws ParseException {
+    Calendar cal = new GregorianCalendar();
+    cal.clear();
+    cal.setTimeZone(TimeZone.getTimeZone("GMT"));
+    cal.set(2004, 1, 3,   // year=2004, month=february(!), day=3
+        22, 8, 56);       // hour, minute, second
+    cal.set(Calendar.MILLISECOND, 333);
+    
+    String dateString;
+    dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.YEAR);
+    assertEquals("2004", dateString);
+    assertEquals("2004-01-01 00:00:00:000", isoFormat(DateTools.stringToDate(dateString)));
+    
+    dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MONTH);
+    assertEquals("200402", dateString);
+    assertEquals("2004-02-01 00:00:00:000", isoFormat(DateTools.stringToDate(dateString)));
+
+    dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.DAY);
+    assertEquals("20040203", dateString);
+    assertEquals("2004-02-03 00:00:00:000", isoFormat(DateTools.stringToDate(dateString)));
+    
+    dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.HOUR);
+    assertEquals("2004020322", dateString);
+    assertEquals("2004-02-03 22:00:00:000", isoFormat(DateTools.stringToDate(dateString)));
+    
+    dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MINUTE);
+    assertEquals("200402032208", dateString);
+    assertEquals("2004-02-03 22:08:00:000", isoFormat(DateTools.stringToDate(dateString)));
+    
+    dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.SECOND);
+    assertEquals("20040203220856", dateString);
+    assertEquals("2004-02-03 22:08:56:000", isoFormat(DateTools.stringToDate(dateString)));
+    
+    dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MILLISECOND);
+    assertEquals("20040203220856333", dateString);
+    assertEquals("2004-02-03 22:08:56:333", isoFormat(DateTools.stringToDate(dateString)));
+
+    // date before 1970:
+    cal.set(1961, 2, 5,   // year=1961, month=march(!), day=5
+        23, 9, 51);       // hour, minute, second
+    cal.set(Calendar.MILLISECOND, 444);
+    dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MILLISECOND);
+    assertEquals("19610305230951444", dateString);
+    assertEquals("1961-03-05 23:09:51:444", isoFormat(DateTools.stringToDate(dateString)));
+
+    dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.HOUR);
+    assertEquals("1961030523", dateString);
+    assertEquals("1961-03-05 23:00:00:000", isoFormat(DateTools.stringToDate(dateString)));
+
+    // timeToString:
+    cal.set(1970, 0, 1, // year=1970, month=january, day=1
+        0, 0, 0); // hour, minute, second
+    cal.set(Calendar.MILLISECOND, 0);
+    dateString = DateTools.timeToString(cal.getTime().getTime(),
+        DateTools.Resolution.MILLISECOND);
+    assertEquals("19700101000000000", dateString);
+        
+    cal.set(1970, 0, 1, // year=1970, month=january, day=1
+        1, 2, 3); // hour, minute, second
+    cal.set(Calendar.MILLISECOND, 0);
+    dateString = DateTools.timeToString(cal.getTime().getTime(),
+        DateTools.Resolution.MILLISECOND);
+    assertEquals("19700101010203000", dateString);
+  }
+  
+  public void testRound() {
+    Calendar cal = new GregorianCalendar();
+    cal.clear();
+    cal.setTimeZone(TimeZone.getTimeZone("GMT"));
+    cal.set(2004, 1, 3,   // year=2004, month=february(!), day=3
+        22, 8, 56);       // hour, minute, second
+    cal.set(Calendar.MILLISECOND, 333);
+    Date date = cal.getTime();
+    assertEquals("2004-02-03 22:08:56:333", isoFormat(date));
+
+    Date dateYear = DateTools.round(date, DateTools.Resolution.YEAR);
+    assertEquals("2004-01-01 00:00:00:000", isoFormat(dateYear));
+
+    Date dateMonth = DateTools.round(date, DateTools.Resolution.MONTH);
+    assertEquals("2004-02-01 00:00:00:000", isoFormat(dateMonth));
+
+    Date dateDay = DateTools.round(date, DateTools.Resolution.DAY);
+    assertEquals("2004-02-03 00:00:00:000", isoFormat(dateDay));
+
+    Date dateHour = DateTools.round(date, DateTools.Resolution.HOUR);
+    assertEquals("2004-02-03 22:00:00:000", isoFormat(dateHour));
+
+    Date dateMinute = DateTools.round(date, DateTools.Resolution.MINUTE);
+    assertEquals("2004-02-03 22:08:00:000", isoFormat(dateMinute));
+
+    Date dateSecond = DateTools.round(date, DateTools.Resolution.SECOND);
+    assertEquals("2004-02-03 22:08:56:000", isoFormat(dateSecond));
+
+    Date dateMillisecond = DateTools.round(date, DateTools.Resolution.MILLISECOND);
+    assertEquals("2004-02-03 22:08:56:333", isoFormat(dateMillisecond));
+
+    // long parameter:
+    long dateYearLong = DateTools.round(date.getTime(), DateTools.Resolution.YEAR);
+    assertEquals("2004-01-01 00:00:00:000", isoFormat(new Date(dateYearLong)));
+
+    long dateMillisecondLong = DateTools.round(date.getTime(), DateTools.Resolution.MILLISECOND);
+    assertEquals("2004-02-03 22:08:56:333", isoFormat(new Date(dateMillisecondLong)));
+  }
+
+  private String isoFormat(Date date) {
+    SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss:SSS", Locale.US);
+    sdf.setTimeZone(TimeZone.getTimeZone("GMT"));
+    return sdf.format(date);
+  }
+
+  public void testDateToolsUTC() throws Exception {
+    // Sun, 30 Oct 2005 00:00:00 +0000 -- the last second of 2005's DST in Europe/London
+    long time = 1130630400;
+    try {
+        TimeZone.setDefault(TimeZone.getTimeZone(/* "GMT" */ "Europe/London"));
+        String d1 = DateTools.dateToString(new Date(time*1000), DateTools.Resolution.MINUTE);
+        String d2 = DateTools.dateToString(new Date((time+3600)*1000), DateTools.Resolution.MINUTE);
+        assertFalse("different times", d1.equals(d2));
+        assertEquals("midnight", DateTools.stringToTime(d1), time*1000);
+        assertEquals("later", DateTools.stringToTime(d2), (time+3600)*1000);
+    } finally {
+        TimeZone.setDefault(null);
+    }
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/document/TestDocument.java b/lucene/backwards/src/test/org/apache/lucene/document/TestDocument.java
new file mode 100644
index 0000000..a89415b
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/document/TestDocument.java
@@ -0,0 +1,282 @@
+package org.apache.lucene.document;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests {@link Document} class.
+ */
+public class TestDocument extends LuceneTestCase {
+  
+  String binaryVal = "this text will be stored as a byte array in the index";
+  String binaryVal2 = "this text will be also stored as a byte array in the index";
+  
+  public void testBinaryField() throws Exception {
+    Document doc = new Document();
+    Fieldable stringFld = new Field("string", binaryVal, Field.Store.YES,
+        Field.Index.NO);
+    Fieldable binaryFld = new Field("binary", binaryVal.getBytes());
+    Fieldable binaryFld2 = new Field("binary", binaryVal2.getBytes());
+    
+    doc.add(stringFld);
+    doc.add(binaryFld);
+    
+    assertEquals(2, doc.fields.size());
+    
+    assertTrue(binaryFld.isBinary());
+    assertTrue(binaryFld.isStored());
+    assertFalse(binaryFld.isIndexed());
+    assertFalse(binaryFld.isTokenized());
+    
+    String binaryTest = new String(doc.getBinaryValue("binary"));
+    assertTrue(binaryTest.equals(binaryVal));
+    
+    String stringTest = doc.get("string");
+    assertTrue(binaryTest.equals(stringTest));
+    
+    doc.add(binaryFld2);
+    
+    assertEquals(3, doc.fields.size());
+    
+    byte[][] binaryTests = doc.getBinaryValues("binary");
+    
+    assertEquals(2, binaryTests.length);
+    
+    binaryTest = new String(binaryTests[0]);
+    String binaryTest2 = new String(binaryTests[1]);
+    
+    assertFalse(binaryTest.equals(binaryTest2));
+    
+    assertTrue(binaryTest.equals(binaryVal));
+    assertTrue(binaryTest2.equals(binaryVal2));
+    
+    doc.removeField("string");
+    assertEquals(2, doc.fields.size());
+    
+    doc.removeFields("binary");
+    assertEquals(0, doc.fields.size());
+  }
+  
+  /**
+   * Tests {@link Document#removeField(String)} method for a brand new Document
+   * that has not been indexed yet.
+   * 
+   * @throws Exception on error
+   */
+  public void testRemoveForNewDocument() throws Exception {
+    Document doc = makeDocumentWithFields();
+    assertEquals(8, doc.fields.size());
+    doc.removeFields("keyword");
+    assertEquals(6, doc.fields.size());
+    doc.removeFields("doesnotexists"); // removing non-existing fields is
+                                       // siltenlty ignored
+    doc.removeFields("keyword"); // removing a field more than once
+    assertEquals(6, doc.fields.size());
+    doc.removeField("text");
+    assertEquals(5, doc.fields.size());
+    doc.removeField("text");
+    assertEquals(4, doc.fields.size());
+    doc.removeField("text");
+    assertEquals(4, doc.fields.size());
+    doc.removeField("doesnotexists"); // removing non-existing fields is
+                                      // siltenlty ignored
+    assertEquals(4, doc.fields.size());
+    doc.removeFields("unindexed");
+    assertEquals(2, doc.fields.size());
+    doc.removeFields("unstored");
+    assertEquals(0, doc.fields.size());
+    doc.removeFields("doesnotexists"); // removing non-existing fields is
+                                       // siltenlty ignored
+    assertEquals(0, doc.fields.size());
+  }
+  
+  public void testConstructorExceptions() {
+    new Field("name", "value", Field.Store.YES, Field.Index.NO); // okay
+    new Field("name", "value", Field.Store.NO, Field.Index.NOT_ANALYZED); // okay
+    try {
+      new Field("name", "value", Field.Store.NO, Field.Index.NO);
+      fail();
+    } catch (IllegalArgumentException e) {
+      // expected exception
+    }
+    new Field("name", "value", Field.Store.YES, Field.Index.NO,
+        Field.TermVector.NO); // okay
+    try {
+      new Field("name", "value", Field.Store.YES, Field.Index.NO,
+          Field.TermVector.YES);
+      fail();
+    } catch (IllegalArgumentException e) {
+      // expected exception
+    }
+  }
+  
+  /**
+   * Tests {@link Document#getValues(String)} method for a brand new Document
+   * that has not been indexed yet.
+   * 
+   * @throws Exception on error
+   */
+  public void testGetValuesForNewDocument() throws Exception {
+    doAssert(makeDocumentWithFields(), false);
+  }
+  
+  /**
+   * Tests {@link Document#getValues(String)} method for a Document retrieved
+   * from an index.
+   * 
+   * @throws Exception on error
+   */
+  public void testGetValuesForIndexedDocument() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir);
+    writer.addDocument(makeDocumentWithFields());
+    IndexReader reader = writer.getReader();
+    
+    IndexSearcher searcher = newSearcher(reader);
+    
+    // search for something that does exists
+    Query query = new TermQuery(new Term("keyword", "test1"));
+    
+    // ensure that queries return expected results without DateFilter first
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    
+    doAssert(searcher.doc(hits[0].doc), true);
+    writer.close();
+    searcher.close();
+    reader.close();
+    dir.close();
+  }
+  
+  private Document makeDocumentWithFields() {
+    Document doc = new Document();
+    doc.add(new Field("keyword", "test1", Field.Store.YES,
+        Field.Index.NOT_ANALYZED));
+    doc.add(new Field("keyword", "test2", Field.Store.YES,
+        Field.Index.NOT_ANALYZED));
+    doc.add(new Field("text", "test1", Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(new Field("text", "test2", Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(new Field("unindexed", "test1", Field.Store.YES, Field.Index.NO));
+    doc.add(new Field("unindexed", "test2", Field.Store.YES, Field.Index.NO));
+    doc
+        .add(new Field("unstored", "test1", Field.Store.NO,
+            Field.Index.ANALYZED));
+    doc
+        .add(new Field("unstored", "test2", Field.Store.NO,
+            Field.Index.ANALYZED));
+    return doc;
+  }
+  
+  private void doAssert(Document doc, boolean fromIndex) {
+    String[] keywordFieldValues = doc.getValues("keyword");
+    String[] textFieldValues = doc.getValues("text");
+    String[] unindexedFieldValues = doc.getValues("unindexed");
+    String[] unstoredFieldValues = doc.getValues("unstored");
+    
+    assertTrue(keywordFieldValues.length == 2);
+    assertTrue(textFieldValues.length == 2);
+    assertTrue(unindexedFieldValues.length == 2);
+    // this test cannot work for documents retrieved from the index
+    // since unstored fields will obviously not be returned
+    if (!fromIndex) {
+      assertTrue(unstoredFieldValues.length == 2);
+    }
+    
+    assertTrue(keywordFieldValues[0].equals("test1"));
+    assertTrue(keywordFieldValues[1].equals("test2"));
+    assertTrue(textFieldValues[0].equals("test1"));
+    assertTrue(textFieldValues[1].equals("test2"));
+    assertTrue(unindexedFieldValues[0].equals("test1"));
+    assertTrue(unindexedFieldValues[1].equals("test2"));
+    // this test cannot work for documents retrieved from the index
+    // since unstored fields will obviously not be returned
+    if (!fromIndex) {
+      assertTrue(unstoredFieldValues[0].equals("test1"));
+      assertTrue(unstoredFieldValues[1].equals("test2"));
+    }
+  }
+  
+  public void testFieldSetValue() throws Exception {
+    
+    Field field = new Field("id", "id1", Field.Store.YES,
+        Field.Index.NOT_ANALYZED);
+    Document doc = new Document();
+    doc.add(field);
+    doc.add(new Field("keyword", "test", Field.Store.YES,
+        Field.Index.NOT_ANALYZED));
+    
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir);
+    writer.addDocument(doc);
+    field.setValue("id2");
+    writer.addDocument(doc);
+    field.setValue("id3");
+    writer.addDocument(doc);
+    
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = newSearcher(reader);
+    
+    Query query = new TermQuery(new Term("keyword", "test"));
+    
+    // ensure that queries return expected results without DateFilter first
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(3, hits.length);
+    int result = 0;
+    for (int i = 0; i < 3; i++) {
+      Document doc2 = searcher.doc(hits[i].doc);
+      Field f = doc2.getField("id");
+      if (f.stringValue().equals("id1")) result |= 1;
+      else if (f.stringValue().equals("id2")) result |= 2;
+      else if (f.stringValue().equals("id3")) result |= 4;
+      else fail("unexpected id field");
+    }
+    writer.close();
+    searcher.close();
+    reader.close();
+    dir.close();
+    assertEquals("did not see all IDs", 7, result);
+  }
+  
+  public void testFieldSetValueChangeBinary() {
+    Field field1 = new Field("field1", new byte[0]);
+    Field field2 = new Field("field2", "", Field.Store.YES,
+        Field.Index.ANALYZED);
+    try {
+      field1.setValue("abc");
+      fail("did not hit expected exception");
+    } catch (IllegalArgumentException iae) {
+      // expected
+    }
+    try {
+      field2.setValue(new byte[0]);
+      fail("did not hit expected exception");
+    } catch (IllegalArgumentException iae) {
+      // expected
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/document/TestNumberTools.java b/lucene/backwards/src/test/org/apache/lucene/document/TestNumberTools.java
new file mode 100644
index 0000000..12734d9
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/document/TestNumberTools.java
@@ -0,0 +1,82 @@
+package org.apache.lucene.document;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestNumberTools extends LuceneTestCase {
+    public void testNearZero() {
+        for (int i = -100; i <= 100; i++) {
+            for (int j = -100; j <= 100; j++) {
+                subtestTwoLongs(i, j);
+            }
+        }
+    }
+
+    public void testMax() {
+        // make sure the constants convert to their equivalents
+        assertEquals(Long.MAX_VALUE, NumberTools
+                .stringToLong(NumberTools.MAX_STRING_VALUE));
+        assertEquals(NumberTools.MAX_STRING_VALUE, NumberTools
+                .longToString(Long.MAX_VALUE));
+
+        // test near MAX, too
+        for (long l = Long.MAX_VALUE; l > Long.MAX_VALUE - 10000; l--) {
+            subtestTwoLongs(l, l - 1);
+        }
+    }
+
+    public void testMin() {
+        // make sure the constants convert to their equivelents
+        assertEquals(Long.MIN_VALUE, NumberTools
+                .stringToLong(NumberTools.MIN_STRING_VALUE));
+        assertEquals(NumberTools.MIN_STRING_VALUE, NumberTools
+                .longToString(Long.MIN_VALUE));
+
+        // test near MIN, too
+        for (long l = Long.MIN_VALUE; l < Long.MIN_VALUE + 10000; l++) {
+            subtestTwoLongs(l, l + 1);
+        }
+    }
+
+    private static void subtestTwoLongs(long i, long j) {
+        // convert to strings
+        String a = NumberTools.longToString(i);
+        String b = NumberTools.longToString(j);
+
+        // are they the right length?
+        assertEquals(NumberTools.STR_SIZE, a.length());
+        assertEquals(NumberTools.STR_SIZE, b.length());
+
+        // are they the right order?
+        if (i < j) {
+            assertTrue(a.compareTo(b) < 0);
+        } else if (i > j) {
+            assertTrue(a.compareTo(b) > 0);
+        } else {
+            assertEquals(a, b);
+        }
+
+        // can we convert them back to longs?
+        long i2 = NumberTools.stringToLong(a);
+        long j2 = NumberTools.stringToLong(b);
+
+        assertEquals(i, i2);
+        assertEquals(j, j2);
+    }
+}
\ No newline at end of file
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/Test2BPostings.java b/lucene/backwards/src/test/org/apache/lucene/index/Test2BPostings.java
new file mode 100644
index 0000000..3d5f7de
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/Test2BPostings.java
@@ -0,0 +1,107 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import org.junit.Ignore;
+
+/**
+ * Test indexes ~82M docs with 26 terms each, so you get > Integer.MAX_VALUE terms/docs pairs
+ * @lucene.experimental
+ */
+public class Test2BPostings extends LuceneTestCase {
+
+  @Nightly
+  public void test() throws Exception {
+
+    MockDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BPostings"));
+    dir.setThrottling(MockDirectoryWrapper.Throttling.NEVER);
+    dir.setCheckIndexOnClose(false); // don't double-checkindex
+    
+    IndexWriter w = new IndexWriter(dir,
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
+        .setRAMBufferSizeMB(256.0)
+        .setMergeScheduler(new ConcurrentMergeScheduler())
+        .setMergePolicy(newLogMergePolicy(false, 10))
+        .setOpenMode(IndexWriterConfig.OpenMode.CREATE));
+
+    MergePolicy mp = w.getConfig().getMergePolicy();
+    if (mp instanceof LogByteSizeMergePolicy) {
+     // 1 petabyte:
+     ((LogByteSizeMergePolicy) mp).setMaxMergeMB(1024*1024*1024);
+    }
+
+    Document doc = new Document();
+    Field field = new Field("field", new MyTokenStream());
+    field.setIndexOptions(IndexOptions.DOCS_ONLY);
+    field.setOmitNorms(true);
+    doc.add(field);
+    
+    final int numDocs = (Integer.MAX_VALUE / 26) + 1;
+    for (int i = 0; i < numDocs; i++) {
+      w.addDocument(doc);
+      if (VERBOSE && i % 100000 == 0) {
+        System.out.println(i + " of " + numDocs + "...");
+      }
+    }
+    w.optimize();
+    w.close();
+    CheckIndex ci = new CheckIndex(dir);
+    if (VERBOSE) {
+      ci.setInfoStream(System.out);
+    }
+    ci.checkIndex();
+    dir.close();
+  }
+  
+  public static final class MyTokenStream extends TokenStream {
+    private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+    private final char buffer[];
+    int index;
+
+    public MyTokenStream() {
+      termAtt.setLength(1);
+      buffer = termAtt.buffer();
+    }
+    
+    @Override
+    public boolean incrementToken() throws IOException {
+      if (index <= 'z') {
+        buffer[0] = (char) index++;
+        return true;
+      }
+      return false;
+    }
+    
+    @Override
+    public void reset() throws IOException {
+      index = 'a';
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/Test2BTerms.java b/lucene/backwards/src/test/org/apache/lucene/index/Test2BTerms.java
new file mode 100644
index 0000000..d8665c9
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/Test2BTerms.java
@@ -0,0 +1,220 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.*;
+import org.apache.lucene.store.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.analysis.*;
+import org.apache.lucene.analysis.tokenattributes.*;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import java.io.File;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import org.junit.Ignore;
+
+// Best to run this test w/ plenty of RAM (because of the
+// terms index):
+//
+//   ant compile-test
+//
+//   java -server -Xmx8g -d64 -cp .:lib/junit-4.7.jar:./build/classes/test:./build/classes/test-framework:./build/classes/java -Dlucene.version=4.0-dev -Dtests.directory=MMapDirectory -DtempDir=build -ea org.junit.runner.JUnitCore org.apache.lucene.index.Test2BTerms
+//
+
+public class Test2BTerms extends LuceneTestCase {
+
+  private final class MyTokenStream extends TokenStream {
+
+    private final int tokensPerDoc;
+    private int tokenCount;
+    private final CharTermAttribute charTerm;
+    private final static int TOKEN_LEN = 5;
+    private final char[] chars;
+    public final List<String> savedTerms = new ArrayList<String>();
+    private int nextSave;
+
+    public MyTokenStream(int tokensPerDoc) {
+      super();
+      this.tokensPerDoc = tokensPerDoc;
+      charTerm = addAttribute(CharTermAttribute.class);
+      chars = charTerm.resizeBuffer(TOKEN_LEN);
+      charTerm.setLength(TOKEN_LEN);
+      nextSave = _TestUtil.nextInt(random, 500000, 1000000);
+    }
+    
+    @Override
+    public boolean incrementToken() {
+      if (tokenCount >= tokensPerDoc) {
+        return false;
+      }
+      _TestUtil.randomFixedLengthUnicodeString(random, chars, 0, TOKEN_LEN);
+      tokenCount++;
+      if (--nextSave == 0) {
+        final String s = new String(chars, 0, TOKEN_LEN);
+        System.out.println("TEST: save term=" + s + " [" + toHexString(s) + "]");
+        savedTerms.add(s);
+        nextSave = _TestUtil.nextInt(random, 500000, 1000000);
+      }
+      return true;
+    }
+
+    @Override
+    public void reset() {
+      tokenCount = 0;
+    }
+  }
+
+  @Ignore("Takes ~4 hours to run on a fast machine!!")
+  public void test2BTerms() throws IOException {
+
+    final long TERM_COUNT = ((long) Integer.MAX_VALUE) + 100000000;
+
+    final int TERMS_PER_DOC = _TestUtil.nextInt(random, 100000, 1000000);
+
+    List<String> savedTerms = null;
+
+    MockDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BTerms"));
+    dir.setThrottling(MockDirectoryWrapper.Throttling.NEVER);
+    dir.setCheckIndexOnClose(false); // don't double-checkindex
+    //Directory dir = newFSDirectory(new File("/p/lucene/indices/2bindex"));
+
+    if (true) {
+
+      IndexWriter w = new IndexWriter(dir,
+                                      new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
+                                      .setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
+                                      .setRAMBufferSizeMB(256.0)
+                                      .setMergeScheduler(new ConcurrentMergeScheduler())
+                                      .setMergePolicy(newLogMergePolicy(false, 10))
+                                      .setOpenMode(IndexWriterConfig.OpenMode.CREATE));
+
+      MergePolicy mp = w.getConfig().getMergePolicy();
+      if (mp instanceof LogByteSizeMergePolicy) {
+        // 1 petabyte:
+        ((LogByteSizeMergePolicy) mp).setMaxMergeMB(1024*1024*1024);
+      }
+
+      Document doc = new Document();
+
+      final MyTokenStream ts = new MyTokenStream(TERMS_PER_DOC);
+      Field field = new Field("field", ts);
+      field.setIndexOptions(IndexOptions.DOCS_ONLY);
+      field.setOmitNorms(true);
+      doc.add(field);
+      //w.setInfoStream(System.out);
+      final int numDocs = (int) (TERM_COUNT/TERMS_PER_DOC);
+
+      System.out.println("TERMS_PER_DOC=" + TERMS_PER_DOC);
+      System.out.println("numDocs=" + numDocs);
+
+      for(int i=0;i<numDocs;i++) {
+        final long t0 = System.currentTimeMillis();
+        w.addDocument(doc);
+        System.out.println(i + " of " + numDocs + " " + (System.currentTimeMillis()-t0) + " msec");
+      }
+      savedTerms = ts.savedTerms;
+
+      System.out.println("TEST: optimize");
+      w.optimize();
+      System.out.println("TEST: close writer");
+      w.close();
+    }
+
+    System.out.println("TEST: open reader");
+    final IndexReader r = IndexReader.open(dir);
+    if (savedTerms == null) {
+      savedTerms = findTerms(r);
+    }
+    final int numSavedTerms = savedTerms.size();
+    final List<String> bigOrdTerms = new ArrayList<String>(savedTerms.subList(numSavedTerms-10, numSavedTerms));
+    System.out.println("TEST: test big ord terms...");
+    testSavedTerms(r, bigOrdTerms);
+    System.out.println("TEST: test all saved terms...");
+    testSavedTerms(r, savedTerms);
+    r.close();
+
+    System.out.println("TEST: now CheckIndex...");
+    CheckIndex.Status status = _TestUtil.checkIndex(dir);
+    final long tc = status.segmentInfos.get(0).termIndexStatus.termCount;
+    assertTrue("count " + tc + " is not > " + Integer.MAX_VALUE, tc > Integer.MAX_VALUE);
+    dir.close();
+  }
+
+  private List<String> findTerms(IndexReader r) throws IOException {
+    System.out.println("TEST: findTerms");
+    final TermEnum termEnum = r.terms();
+    final List<String> savedTerms = new ArrayList<String>();
+    int nextSave = _TestUtil.nextInt(random, 500000, 1000000);
+    while(termEnum.next()) {
+      if (--nextSave == 0) {
+        savedTerms.add(termEnum.term().text());
+        System.out.println("TEST: add " + termEnum.term());
+        nextSave = _TestUtil.nextInt(random, 500000, 1000000);
+      }
+    }
+    return savedTerms;
+  }
+
+  private String toHexString(String s) {
+    byte[] bytes;
+    try {
+      bytes = s.getBytes("UTF-8");
+    } catch (UnsupportedEncodingException uee) {
+      throw new RuntimeException(uee);
+    }
+    StringBuilder sb = new StringBuilder();
+    for(byte b : bytes) {
+      if (sb.length() > 0) {
+        sb.append(' ');
+      }
+      sb.append(Integer.toHexString(b&0xFF));
+    }
+    return sb.toString();
+  }
+
+  private void testSavedTerms(IndexReader r, List<String> terms) throws IOException {
+    System.out.println("TEST: run " + terms.size() + " terms on reader=" + r);
+    IndexSearcher s = new IndexSearcher(r);
+    Collections.shuffle(terms);
+    boolean failed = false;
+    for(int iter=0;iter<10*terms.size();iter++) {
+      final String term = terms.get(random.nextInt(terms.size()));
+      System.out.println("TEST: search " + term + " [" + toHexString(term) + "]");
+      final long t0 = System.currentTimeMillis();
+      final int count = s.search(new TermQuery(new Term("field", term)), 1).totalHits;
+      if (count <= 0) {
+        System.out.println("  FAILED: count=" + count);
+        failed = true;
+      }
+      final long t1 = System.currentTimeMillis();
+      System.out.println("  took " + (t1-t0) + " millis");
+
+      final TermEnum termEnum = r.terms(new Term("field", term));
+      final String text = termEnum.term().text();
+      if (!term.equals(text)) {
+        System.out.println("  FAILED: wrong term: got " + text + " [" + toHexString(text) + "]");
+        failed = true;
+      }
+    }
+    assertFalse(failed);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/backwards/src/test/org/apache/lucene/index/TestAddIndexes.java
new file mode 100755
index 0000000..8838522
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestAddIndexes.java
@@ -0,0 +1,1048 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.FileNotFoundException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+
+import org.apache.lucene.search.PhraseQuery;
+
+public class TestAddIndexes extends LuceneTestCase {
+  
+  public void testSimpleCase() throws IOException {
+    // main directory
+    Directory dir = newDirectory();
+    // two auxiliary directories
+    Directory aux = newDirectory();
+    Directory aux2 = newDirectory();
+
+    IndexWriter writer = null;
+
+    writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT,
+        new MockAnalyzer(random))
+        .setOpenMode(OpenMode.CREATE));
+    // add 100 documents
+    addDocs(writer, 100);
+    assertEquals(100, writer.maxDoc());
+    writer.close();
+
+    writer = newWriter(
+        aux,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setOpenMode(OpenMode.CREATE).
+            setMergePolicy(newLogMergePolicy(false))
+    );
+    // add 40 documents in separate files
+    addDocs(writer, 40);
+    assertEquals(40, writer.maxDoc());
+    writer.close();
+
+    writer = newWriter(aux2, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
+    // add 50 documents in compound files
+    addDocs2(writer, 50);
+    assertEquals(50, writer.maxDoc());
+    writer.close();
+
+    // test doc count before segments are merged
+    writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+    assertEquals(100, writer.maxDoc());
+    writer.addIndexes(new Directory[] { aux, aux2 });
+    assertEquals(190, writer.maxDoc());
+    writer.close();
+
+    // make sure the old index is correct
+    verifyNumDocs(aux, 40);
+
+    // make sure the new index is correct
+    verifyNumDocs(dir, 190);
+
+    // now add another set in.
+    Directory aux3 = newDirectory();
+    writer = newWriter(aux3, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    // add 40 documents
+    addDocs(writer, 40);
+    assertEquals(40, writer.maxDoc());
+    writer.close();
+
+    // test doc count before segments are merged/index is optimized
+    writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+    assertEquals(190, writer.maxDoc());
+    writer.addIndexes(new Directory[] { aux3 });
+    assertEquals(230, writer.maxDoc());
+    writer.close();
+
+    // make sure the new index is correct
+    verifyNumDocs(dir, 230);
+
+    verifyTermDocs(dir, new Term("content", "aaa"), 180);
+
+    verifyTermDocs(dir, new Term("content", "bbb"), 50);
+
+    // now optimize it.
+    writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+    writer.optimize();
+    writer.close();
+
+    // make sure the new index is correct
+    verifyNumDocs(dir, 230);
+
+    verifyTermDocs(dir, new Term("content", "aaa"), 180);
+
+    verifyTermDocs(dir, new Term("content", "bbb"), 50);
+
+    // now add a single document
+    Directory aux4 = newDirectory();
+    writer = newWriter(aux4, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    addDocs2(writer, 1);
+    writer.close();
+
+    writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+    assertEquals(230, writer.maxDoc());
+    writer.addIndexes(new Directory[] { aux4 });
+    assertEquals(231, writer.maxDoc());
+    writer.close();
+
+    verifyNumDocs(dir, 231);
+
+    verifyTermDocs(dir, new Term("content", "bbb"), 51);
+    dir.close();
+    aux.close();
+    aux2.close();
+    aux3.close();
+    aux4.close();
+  }
+
+  public void testWithPendingDeletes() throws IOException {
+    // main directory
+    Directory dir = newDirectory();
+    // auxiliary directory
+    Directory aux = newDirectory();
+
+    setUpDirs(dir, aux);
+    IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    writer.addIndexes(aux);
+
+    // Adds 10 docs, then replaces them with another 10
+    // docs, so 10 pending deletes:
+    for (int i = 0; i < 20; i++) {
+      Document doc = new Document();
+      doc.add(newField("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
+      doc.add(newField("content", "bbb " + i, Field.Store.NO,
+                        Field.Index.ANALYZED));
+      writer.updateDocument(new Term("id", "" + (i%10)), doc);
+    }
+    // Deletes one of the 10 added docs, leaving 9:
+    PhraseQuery q = new PhraseQuery();
+    q.add(new Term("content", "bbb"));
+    q.add(new Term("content", "14"));
+    writer.deleteDocuments(q);
+
+    writer.optimize();
+    writer.commit();
+
+    verifyNumDocs(dir, 1039);
+    verifyTermDocs(dir, new Term("content", "aaa"), 1030);
+    verifyTermDocs(dir, new Term("content", "bbb"), 9);
+
+    writer.close();
+    dir.close();
+    aux.close();
+  }
+
+  public void testWithPendingDeletes2() throws IOException {
+    // main directory
+    Directory dir = newDirectory();
+    // auxiliary directory
+    Directory aux = newDirectory();
+
+    setUpDirs(dir, aux);
+    IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+    // Adds 10 docs, then replaces them with another 10
+    // docs, so 10 pending deletes:
+    for (int i = 0; i < 20; i++) {
+      Document doc = new Document();
+      doc.add(newField("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
+      doc.add(newField("content", "bbb " + i, Field.Store.NO, Field.Index.ANALYZED));
+      writer.updateDocument(new Term("id", "" + (i%10)), doc);
+    }
+    
+    writer.addIndexes(new Directory[] {aux});
+    
+    // Deletes one of the 10 added docs, leaving 9:
+    PhraseQuery q = new PhraseQuery();
+    q.add(new Term("content", "bbb"));
+    q.add(new Term("content", "14"));
+    writer.deleteDocuments(q);
+
+    writer.optimize();
+    writer.commit();
+
+    verifyNumDocs(dir, 1039);
+    verifyTermDocs(dir, new Term("content", "aaa"), 1030);
+    verifyTermDocs(dir, new Term("content", "bbb"), 9);
+
+    writer.close();
+    dir.close();
+    aux.close();
+  }
+
+  public void testWithPendingDeletes3() throws IOException {
+    // main directory
+    Directory dir = newDirectory();
+    // auxiliary directory
+    Directory aux = newDirectory();
+
+    setUpDirs(dir, aux);
+    IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+
+    // Adds 10 docs, then replaces them with another 10
+    // docs, so 10 pending deletes:
+    for (int i = 0; i < 20; i++) {
+      Document doc = new Document();
+      doc.add(newField("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
+      doc.add(newField("content", "bbb " + i, Field.Store.NO,
+                        Field.Index.ANALYZED));
+      writer.updateDocument(new Term("id", "" + (i%10)), doc);
+    }
+
+    // Deletes one of the 10 added docs, leaving 9:
+    PhraseQuery q = new PhraseQuery();
+    q.add(new Term("content", "bbb"));
+    q.add(new Term("content", "14"));
+    writer.deleteDocuments(q);
+
+    writer.addIndexes(new Directory[] {aux});
+
+    writer.optimize();
+    writer.commit();
+
+    verifyNumDocs(dir, 1039);
+    verifyTermDocs(dir, new Term("content", "aaa"), 1030);
+    verifyTermDocs(dir, new Term("content", "bbb"), 9);
+
+    writer.close();
+    dir.close();
+    aux.close();
+  }
+
+  // case 0: add self or exceed maxMergeDocs, expect exception
+  public void testAddSelf() throws IOException {
+    // main directory
+    Directory dir = newDirectory();
+    // auxiliary directory
+    Directory aux = newDirectory();
+
+    IndexWriter writer = null;
+
+    writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    // add 100 documents
+    addDocs(writer, 100);
+    assertEquals(100, writer.maxDoc());
+    writer.close();
+
+    writer = newWriter(
+        aux,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setOpenMode(OpenMode.CREATE).
+            setMaxBufferedDocs(1000).
+            setMergePolicy(newLogMergePolicy(false))
+    );
+    // add 140 documents in separate files
+    addDocs(writer, 40);
+    writer.close();
+    writer = newWriter(
+        aux,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setOpenMode(OpenMode.CREATE).
+            setMaxBufferedDocs(1000).
+            setMergePolicy(newLogMergePolicy(false))
+    );
+    addDocs(writer, 100);
+    writer.close();
+
+    writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+    try {
+      // cannot add self
+      writer.addIndexes(new Directory[] { aux, dir });
+      assertTrue(false);
+    }
+    catch (IllegalArgumentException e) {
+      assertEquals(100, writer.maxDoc());
+    }
+    writer.close();
+
+    // make sure the index is correct
+    verifyNumDocs(dir, 100);
+    dir.close();
+    aux.close();
+  }
+
+  // in all the remaining tests, make the doc count of the oldest segment
+  // in dir large so that it is never merged in addIndexes()
+  // case 1: no tail segments
+  public void testNoTailSegments() throws IOException {
+    // main directory
+    Directory dir = newDirectory();
+    // auxiliary directory
+    Directory aux = newDirectory();
+
+    setUpDirs(dir, aux);
+
+    IndexWriter writer = newWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setOpenMode(OpenMode.APPEND).
+            setMaxBufferedDocs(10).
+            setMergePolicy(newLogMergePolicy(4))
+    );
+    addDocs(writer, 10);
+
+    writer.addIndexes(new Directory[] { aux });
+    assertEquals(1040, writer.maxDoc());
+    assertEquals(1000, writer.getDocCount(0));
+    writer.close();
+
+    // make sure the index is correct
+    verifyNumDocs(dir, 1040);
+    dir.close();
+    aux.close();
+  }
+
+  // case 2: tail segments, invariants hold, no copy
+  public void testNoCopySegments() throws IOException {
+    // main directory
+    Directory dir = newDirectory();
+    // auxiliary directory
+    Directory aux = newDirectory();
+
+    setUpDirs(dir, aux);
+
+    IndexWriter writer = newWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setOpenMode(OpenMode.APPEND).
+            setMaxBufferedDocs(9).
+            setMergePolicy(newLogMergePolicy(4))
+    );
+    addDocs(writer, 2);
+
+    writer.addIndexes(new Directory[] { aux });
+    assertEquals(1032, writer.maxDoc());
+    assertEquals(1000, writer.getDocCount(0));
+    writer.close();
+
+    // make sure the index is correct
+    verifyNumDocs(dir, 1032);
+    dir.close();
+    aux.close();
+  }
+
+  // case 3: tail segments, invariants hold, copy, invariants hold
+  public void testNoMergeAfterCopy() throws IOException {
+    // main directory
+    Directory dir = newDirectory();
+    // auxiliary directory
+    Directory aux = newDirectory();
+
+    setUpDirs(dir, aux);
+
+    IndexWriter writer = newWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setOpenMode(OpenMode.APPEND).
+            setMaxBufferedDocs(10).
+            setMergePolicy(newLogMergePolicy(4))
+    );
+
+    writer.addIndexes(new Directory[] { aux, new MockDirectoryWrapper(random, new RAMDirectory(aux)) });
+    assertEquals(1060, writer.maxDoc());
+    assertEquals(1000, writer.getDocCount(0));
+    writer.close();
+
+    // make sure the index is correct
+    verifyNumDocs(dir, 1060);
+    dir.close();
+    aux.close();
+  }
+
+  // case 4: tail segments, invariants hold, copy, invariants not hold
+  public void testMergeAfterCopy() throws IOException {
+    // main directory
+    Directory dir = newDirectory();
+    // auxiliary directory
+    Directory aux = newDirectory();
+
+    setUpDirs(dir, aux);
+
+    IndexReader reader = IndexReader.open(aux, false);
+    for (int i = 0; i < 20; i++) {
+      reader.deleteDocument(i);
+    }
+    assertEquals(10, reader.numDocs());
+    reader.close();
+
+    IndexWriter writer = newWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setOpenMode(OpenMode.APPEND).
+            setMaxBufferedDocs(4).
+            setMergePolicy(newLogMergePolicy(4))
+    );
+
+    writer.addIndexes(new Directory[] { aux, new MockDirectoryWrapper(random, new RAMDirectory(aux)) });
+    assertEquals(1020, writer.maxDoc());
+    assertEquals(1000, writer.getDocCount(0));
+    writer.close();
+    dir.close();
+    aux.close();
+  }
+
+  // case 5: tail segments, invariants not hold
+  public void testMoreMerges() throws IOException {
+    // main directory
+    Directory dir = newDirectory();
+    // auxiliary directory
+    Directory aux = newDirectory();
+    Directory aux2 = newDirectory();
+
+    setUpDirs(dir, aux);
+
+    IndexWriter writer = newWriter(
+        aux2,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setOpenMode(OpenMode.CREATE).
+            setMaxBufferedDocs(100).
+            setMergePolicy(newLogMergePolicy(10))
+    );
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    writer.addIndexes(aux);
+    assertEquals(30, writer.maxDoc());
+    writer.close();
+
+    IndexReader reader = IndexReader.open(aux, false);
+    for (int i = 0; i < 27; i++) {
+      reader.deleteDocument(i);
+    }
+    assertEquals(3, reader.numDocs());
+    reader.close();
+
+    reader = IndexReader.open(aux2, false);
+    for (int i = 0; i < 8; i++) {
+      reader.deleteDocument(i);
+    }
+    assertEquals(22, reader.numDocs());
+    reader.close();
+
+    writer = newWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setOpenMode(OpenMode.APPEND).
+            setMaxBufferedDocs(6).
+            setMergePolicy(newLogMergePolicy(4))
+    );
+
+    writer.addIndexes(new Directory[] { aux, aux2 });
+    assertEquals(1040, writer.maxDoc());
+    assertEquals(1000, writer.getDocCount(0));
+    writer.close();
+    dir.close();
+    aux.close();
+    aux2.close();
+  }
+
+  private IndexWriter newWriter(Directory dir, IndexWriterConfig conf)
+      throws IOException {
+    conf.setMergePolicy(new LogDocMergePolicy());
+    final IndexWriter writer = new IndexWriter(dir, conf);
+    return writer;
+  }
+
+  private void addDocs(IndexWriter writer, int numDocs) throws IOException {
+    for (int i = 0; i < numDocs; i++) {
+      Document doc = new Document();
+      doc.add(newField("content", "aaa", Field.Store.NO,
+                        Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+  }
+
+  private void addDocs2(IndexWriter writer, int numDocs) throws IOException {
+    for (int i = 0; i < numDocs; i++) {
+      Document doc = new Document();
+      doc.add(newField("content", "bbb", Field.Store.NO,
+                        Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+  }
+
+  private void verifyNumDocs(Directory dir, int numDocs) throws IOException {
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(numDocs, reader.maxDoc());
+    assertEquals(numDocs, reader.numDocs());
+    reader.close();
+  }
+
+  private void verifyTermDocs(Directory dir, Term term, int numDocs)
+      throws IOException {
+    IndexReader reader = IndexReader.open(dir, true);
+    TermDocs termDocs = reader.termDocs(term);
+    int count = 0;
+    while (termDocs.next())
+      count++;
+    assertEquals(numDocs, count);
+    reader.close();
+  }
+
+  private void setUpDirs(Directory dir, Directory aux) throws IOException {
+    IndexWriter writer = null;
+
+    writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000));
+    // add 1000 documents in 1 segment
+    addDocs(writer, 1000);
+    assertEquals(1000, writer.maxDoc());
+    assertEquals(1, writer.getSegmentCount());
+    writer.close();
+
+    writer = newWriter(
+        aux,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setOpenMode(OpenMode.CREATE).
+            setMaxBufferedDocs(1000).
+            setMergePolicy(newLogMergePolicy(false, 10))
+    );
+    // add 30 documents in 3 segments
+    for (int i = 0; i < 3; i++) {
+      addDocs(writer, 10);
+      writer.close();
+      writer = newWriter(
+          aux,
+          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+              setOpenMode(OpenMode.APPEND).
+              setMaxBufferedDocs(1000).
+              setMergePolicy(newLogMergePolicy(false, 10))
+      );
+    }
+    assertEquals(30, writer.maxDoc());
+    assertEquals(3, writer.getSegmentCount());
+    writer.close();
+  }
+
+  // LUCENE-1270
+  public void testHangOnClose() throws IOException {
+
+    Directory dir = newDirectory();
+    LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
+    lmp.setUseCompoundFile(false);
+    lmp.setMergeFactor(100);
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(5).setMergePolicy(lmp));
+
+    Document doc = new Document();
+    doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
+                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    for(int i=0;i<60;i++)
+      writer.addDocument(doc);
+
+    Document doc2 = new Document();
+    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
+                      Field.Index.NO));
+    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
+                      Field.Index.NO));
+    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
+                      Field.Index.NO));
+    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
+                      Field.Index.NO));
+    for(int i=0;i<10;i++)
+      writer.addDocument(doc2);
+    writer.close();
+
+    Directory dir2 = newDirectory();
+    lmp = new LogByteSizeMergePolicy();
+    lmp.setMinMergeMB(0.0001);
+    lmp.setUseCompoundFile(false);
+    lmp.setMergeFactor(4);
+    writer = new IndexWriter(dir2, newIndexWriterConfig(TEST_VERSION_CURRENT,
+        new MockAnalyzer(random))
+        .setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(lmp));
+    writer.addIndexes(new Directory[] {dir});
+    writer.close();
+    dir.close();
+    dir2.close();
+  }
+
+  // TODO: these are also in TestIndexWriter... add a simple doc-writing method
+  // like this to LuceneTestCase?
+  private void addDoc(IndexWriter writer) throws IOException
+  {
+      Document doc = new Document();
+      doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+  }
+  
+  private abstract class RunAddIndexesThreads {
+
+    Directory dir, dir2;
+    final static int NUM_INIT_DOCS = 17;
+    IndexWriter writer2;
+    final List<Throwable> failures = new ArrayList<Throwable>();
+    volatile boolean didClose;
+    final IndexReader[] readers;
+    final int NUM_COPY;
+    final static int NUM_THREADS = 5;
+    final Thread[] threads = new Thread[NUM_THREADS];
+
+    public RunAddIndexesThreads(int numCopy) throws Throwable {
+      NUM_COPY = numCopy;
+      dir = new MockDirectoryWrapper(random, new RAMDirectory());
+      IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
+          TEST_VERSION_CURRENT, new MockAnalyzer(random))
+          .setMaxBufferedDocs(2));
+      for (int i = 0; i < NUM_INIT_DOCS; i++)
+        addDoc(writer);
+      writer.close();
+
+      dir2 = newDirectory();
+      writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      writer2.setInfoStream(VERBOSE ? System.out : null);
+      writer2.commit();
+      
+
+      readers = new IndexReader[NUM_COPY];
+      for(int i=0;i<NUM_COPY;i++)
+        readers[i] = IndexReader.open(dir, true);
+    }
+
+    void launchThreads(final int numIter) {
+
+      for(int i=0;i<NUM_THREADS;i++) {
+        threads[i] = new Thread() {
+            @Override
+            public void run() {
+              try {
+
+                final Directory[] dirs = new Directory[NUM_COPY];
+                for(int k=0;k<NUM_COPY;k++)
+                  dirs[k] = new MockDirectoryWrapper(random, new RAMDirectory(dir));
+
+                int j=0;
+
+                while(true) {
+                  // System.out.println(Thread.currentThread().getName() + ": iter j=" + j);
+                  if (numIter > 0 && j == numIter)
+                    break;
+                  doBody(j++, dirs);
+                }
+              } catch (Throwable t) {
+                handle(t);
+              }
+            }
+          };
+      }
+
+      for(int i=0;i<NUM_THREADS;i++)
+        threads[i].start();
+    }
+
+    void joinThreads() throws Exception {
+      for(int i=0;i<NUM_THREADS;i++)
+        threads[i].join();
+    }
+
+    void close(boolean doWait) throws Throwable {
+      didClose = true;
+      writer2.close(doWait);
+    }
+
+    void closeDir() throws Throwable {
+      for(int i=0;i<NUM_COPY;i++)
+        readers[i].close();
+      dir2.close();
+    }
+
+    abstract void doBody(int j, Directory[] dirs) throws Throwable;
+    abstract void handle(Throwable t);
+  }
+
+  private class CommitAndAddIndexes extends RunAddIndexesThreads {
+    public CommitAndAddIndexes(int numCopy) throws Throwable {
+      super(numCopy);
+    }
+
+    @Override
+    void handle(Throwable t) {
+      t.printStackTrace(System.out);
+      synchronized(failures) {
+        failures.add(t);
+      }
+    }
+
+    @Override
+    void doBody(int j, Directory[] dirs) throws Throwable {
+      switch(j%5) {
+      case 0:
+        if (VERBOSE) {
+          System.out.println(Thread.currentThread().getName() + ": TEST: addIndexes(Dir[]) then optimize");
+        }
+        writer2.addIndexes(dirs);
+        writer2.optimize();
+        break;
+      case 1:
+        if (VERBOSE) {
+          System.out.println(Thread.currentThread().getName() + ": TEST: addIndexes(Dir[])");
+        }
+        writer2.addIndexes(dirs);
+        break;
+      case 2:
+        if (VERBOSE) {
+          System.out.println(Thread.currentThread().getName() + ": TEST: addIndexes(IndexReader[])");
+        }
+        writer2.addIndexes(readers);
+        break;
+      case 3:
+        if (VERBOSE) {
+          System.out.println(Thread.currentThread().getName() + ": TEST: addIndexes(Dir[]) then maybeMerge");
+        }
+        writer2.addIndexes(dirs);
+        writer2.maybeMerge();
+        break;
+      case 4:
+        if (VERBOSE) {
+          System.out.println(Thread.currentThread().getName() + ": TEST: commit");
+        }
+        writer2.commit();
+      }
+    }
+  }
+  
+  // LUCENE-1335: test simultaneous addIndexes & commits
+  // from multiple threads
+  public void testAddIndexesWithThreads() throws Throwable {
+
+    final int NUM_ITER = TEST_NIGHTLY ? 15 : 5;
+    final int NUM_COPY = 3;
+    CommitAndAddIndexes c = new CommitAndAddIndexes(NUM_COPY);
+    c.writer2.setInfoStream(VERBOSE ? System.out : null);
+    c.launchThreads(NUM_ITER);
+
+    for(int i=0;i<100;i++)
+      addDoc(c.writer2);
+
+    c.joinThreads();
+
+    int expectedNumDocs = 100+NUM_COPY*(4*NUM_ITER/5)*RunAddIndexesThreads.NUM_THREADS*RunAddIndexesThreads.NUM_INIT_DOCS;
+    assertEquals(expectedNumDocs, c.writer2.numDocs());
+
+    c.close(true);
+
+    assertTrue(c.failures.size() == 0);
+
+    IndexReader reader = IndexReader.open(c.dir2, true);
+    assertEquals(expectedNumDocs, reader.numDocs());
+    reader.close();
+
+    c.closeDir();
+  }
+
+  private class CommitAndAddIndexes2 extends CommitAndAddIndexes {
+    public CommitAndAddIndexes2(int numCopy) throws Throwable {
+      super(numCopy);
+    }
+
+    @Override
+    void handle(Throwable t) {
+      if (!(t instanceof AlreadyClosedException) && !(t instanceof NullPointerException)) {
+        t.printStackTrace(System.out);
+        synchronized(failures) {
+          failures.add(t);
+        }
+      }
+    }
+  }
+
+  // LUCENE-1335: test simultaneous addIndexes & close
+  public void testAddIndexesWithClose() throws Throwable {
+    final int NUM_COPY = 3;
+    CommitAndAddIndexes2 c = new CommitAndAddIndexes2(NUM_COPY);
+    //c.writer2.setInfoStream(System.out);
+    c.launchThreads(-1);
+
+    // Close w/o first stopping/joining the threads
+    c.close(true);
+    //c.writer2.close();
+
+    c.joinThreads();
+
+    c.closeDir();
+
+    assertTrue(c.failures.size() == 0);
+  }
+
+  private class CommitAndAddIndexes3 extends RunAddIndexesThreads {
+    public CommitAndAddIndexes3(int numCopy) throws Throwable {
+      super(numCopy);
+    }
+
+    @Override
+    void doBody(int j, Directory[] dirs) throws Throwable {
+      switch(j%5) {
+      case 0:
+        if (VERBOSE) {
+          System.out.println("TEST: " + Thread.currentThread().getName() + ": addIndexes + optimize");
+        }
+        writer2.addIndexes(dirs);
+        writer2.optimize();
+        break;
+      case 1:
+        if (VERBOSE) {
+          System.out.println("TEST: " + Thread.currentThread().getName() + ": addIndexes");
+        }
+        writer2.addIndexes(dirs);
+        break;
+      case 2:
+        if (VERBOSE) {
+          System.out.println("TEST: " + Thread.currentThread().getName() + ": addIndexes(IR[])");
+        }
+        writer2.addIndexes(readers);
+        break;
+      case 3:
+        if (VERBOSE) {
+          System.out.println("TEST: " + Thread.currentThread().getName() + ": optimize");
+        }
+        writer2.optimize();
+        break;
+      case 4:
+        if (VERBOSE) {
+          System.out.println("TEST: " + Thread.currentThread().getName() + ": commit");
+        }
+        writer2.commit();
+      }
+    }
+
+    @Override
+    void handle(Throwable t) {
+      boolean report = true;
+
+      if (t instanceof AlreadyClosedException || t instanceof MergePolicy.MergeAbortedException || t instanceof NullPointerException) {
+        report = !didClose;
+      } else if (t instanceof FileNotFoundException)  {
+        report = !didClose;
+      } else if (t instanceof IOException)  {
+        Throwable t2 = t.getCause();
+        if (t2 instanceof MergePolicy.MergeAbortedException) {
+          report = !didClose;
+        }
+      }
+      if (report) {
+        t.printStackTrace(System.out);
+        synchronized(failures) {
+          failures.add(t);
+        }
+      }
+    }
+  }
+
+  // LUCENE-1335: test simultaneous addIndexes & close
+  public void testAddIndexesWithCloseNoWait() throws Throwable {
+
+    final int NUM_COPY = 50;
+    CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
+    if (VERBOSE) {
+      c.writer2.setInfoStream(System.out);
+    }
+    c.launchThreads(-1);
+
+    Thread.sleep(_TestUtil.nextInt(random, 10, 500));
+
+    // Close w/o first stopping/joining the threads
+    if (VERBOSE) {
+      System.out.println("TEST: now close(false)");
+    }
+    c.close(false);
+
+    c.joinThreads();
+
+    if (VERBOSE) {
+      System.out.println("TEST: done join threads");
+    }
+    c.closeDir();
+
+    assertTrue(c.failures.size() == 0);
+  }
+
+  // LUCENE-1335: test simultaneous addIndexes & close
+  public void testAddIndexesWithRollback() throws Throwable {
+
+    final int NUM_COPY = TEST_NIGHTLY ? 50 : 5;
+    CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
+    c.launchThreads(-1);
+
+    Thread.sleep(_TestUtil.nextInt(random, 10, 500));
+
+    // Close w/o first stopping/joining the threads
+    if (VERBOSE) {
+      System.out.println("TEST: now force rollback");
+    }
+    c.didClose = true;
+    c.writer2.rollback();
+
+    c.joinThreads();
+
+    c.closeDir();
+
+    assertTrue(c.failures.size() == 0);
+  }
+
+  // LUCENE-2790: tests that the non CFS files were deleted by addIndexes
+  public void testNonCFSLeftovers() throws Exception {
+    Directory[] dirs = new Directory[2];
+    for (int i = 0; i < dirs.length; i++) {
+      dirs[i] = new RAMDirectory();
+      IndexWriter w = new IndexWriter(dirs[i], new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      Document d = new Document();
+      d.add(new Field("c", "v", Store.YES, Index.ANALYZED, TermVector.YES));
+      w.addDocument(d);
+      w.close();
+    }
+    
+    IndexReader[] readers = new IndexReader[] { IndexReader.open(dirs[0]), IndexReader.open(dirs[1]) };
+    
+    Directory dir = new RAMDirectory();
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy());
+    LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
+    lmp.setNoCFSRatio(1.0); // Force creation of CFS
+    lmp.setUseCompoundFile(true);
+    IndexWriter w3 = new IndexWriter(dir, conf);
+    w3.addIndexes(readers);
+    w3.close();
+    
+    assertEquals("Only one compound segment should exist", 3, dir.listAll().length);
+  }
+ 
+  // LUCENE-2996: tests that addIndexes(IndexReader) applies existing deletes correctly.
+  public void testExistingDeletes() throws Exception {
+    Directory[] dirs = new Directory[2];
+    for (int i = 0; i < dirs.length; i++) {
+      dirs[i] = newDirectory();
+      IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+      IndexWriter writer = new IndexWriter(dirs[i], conf);
+      Document doc = new Document();
+      doc.add(new Field("id", "myid", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
+      writer.addDocument(doc);
+      writer.close();
+    }
+
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    IndexWriter writer = new IndexWriter(dirs[0], conf);
+
+    // Now delete the document
+    writer.deleteDocuments(new Term("id", "myid"));
+    IndexReader r = IndexReader.open(dirs[1]);
+    try {
+      writer.addIndexes(r);
+    } finally {
+      r.close();
+    }
+    writer.commit();
+    assertEquals("Documents from the incoming index should not have been deleted", 1, writer.numDocs());
+    writer.close();
+
+    for (Directory dir : dirs) {
+      dir.close();
+    }
+
+  }
+  
+  // LUCENE-3126: tests that if a non-CFS segment is copied, it is converted to
+  // a CFS, given MP preferences
+  public void testCopyIntoCFS() throws Exception {
+    // create an index, no CFS (so we can assert that existing segments are not affected)
+    Directory target = newDirectory();
+    LogMergePolicy lmp = newLogMergePolicy(false);
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, null).setMergePolicy(lmp);
+    IndexWriter w = new IndexWriter(target, conf);
+    w.addDocument(new Document());
+    w.commit();
+    assertFalse(w.segmentInfos.info(0).getUseCompoundFile());
+
+    // prepare second index, no-CFS too + .del file + separate norms file
+    Directory src = newDirectory();
+    LogMergePolicy lmp2 = newLogMergePolicy(false);
+    IndexWriterConfig conf2 = newIndexWriterConfig(TEST_VERSION_CURRENT,
+        new MockAnalyzer(random)).setMergePolicy(lmp2);
+    IndexWriter w2 = new IndexWriter(src, conf2);
+    Document doc = new Document();
+    doc.add(new Field("c", "some text", Store.YES, Index.ANALYZED));
+    w2.addDocument(doc);
+    doc = new Document();
+    doc.add(new Field("d", "delete", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
+    w2.addDocument(doc);
+    w2.commit();
+    w2.deleteDocuments(new Term("d", "delete"));
+    w2.commit();
+    w2.close();
+
+    // create separate norms file
+    IndexReader r = IndexReader.open(src, false);
+    r.setNorm(0, "c", (byte) 1);
+    r.close();
+    assertTrue(".del file not found", src.fileExists("_0_1.del"));
+    assertTrue("separate norms file not found", src.fileExists("_0_1.s0"));
+    
+    // Case 1: force 'CFS' on target
+    lmp.setUseCompoundFile(true);
+    lmp.setNoCFSRatio(1.0);
+    w.addIndexes(src);
+    w.commit();
+    assertFalse("existing segments should not be modified by addIndexes", w.segmentInfos.info(0).getUseCompoundFile());
+    assertTrue("segment should have been converted to a CFS by addIndexes", w.segmentInfos.info(1).getUseCompoundFile());
+    assertTrue(".del file not found", target.fileExists("_1_1.del"));
+    assertTrue("separate norms file not found", target.fileExists("_1_1.s0"));
+
+    // Case 2: LMP disallows CFS
+    lmp.setUseCompoundFile(false);
+    w.addIndexes(src);
+    w.commit();
+    assertFalse("segment should not have been converted to a CFS by addIndexes if MP disallows", w.segmentInfos.info(2).getUseCompoundFile());
+
+    w.close();
+    
+    // cleanup
+    src.close();
+    target.close();
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestAllZerosSegmentsFile.java b/lucene/backwards/src/test/org/apache/lucene/index/TestAllZerosSegmentsFile.java
new file mode 100644
index 0000000..b88f171
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestAllZerosSegmentsFile.java
@@ -0,0 +1,47 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestAllZerosSegmentsFile extends LuceneTestCase {
+
+  public void test() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random, dir);
+    w.addDocument(new Document());
+    w.close();
+
+    String nextSegmentsFile = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
+                                                                    "",
+                                                                    SegmentInfos.getCurrentSegmentGeneration(dir)+1);
+    IndexOutput out = dir.createOutput(nextSegmentsFile);
+    for(int idx=0;idx<8;idx++) {
+      out.writeByte((byte) 0);
+    }
+    out.close();
+
+    IndexReader r= IndexReader.open(dir,true);
+    assertEquals(r.numDocs(), 1);
+    r.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestAtomicUpdate.java b/lucene/backwards/src/test/org/apache/lucene/index/TestAtomicUpdate.java
new file mode 100644
index 0000000..d1264cb
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestAtomicUpdate.java
@@ -0,0 +1,204 @@
+package org.apache.lucene.index;
+
+/**
+ * Copyright 2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.*;
+import org.apache.lucene.store.*;
+import org.apache.lucene.document.*;
+import org.apache.lucene.analysis.MockAnalyzer;
+
+import java.util.Random;
+import java.io.File;
+import java.io.IOException;
+
+public class TestAtomicUpdate extends LuceneTestCase {
+  
+  private static final class MockIndexWriter extends IndexWriter {
+
+    static Random RANDOM;
+
+    public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
+      super(dir, conf);
+    }
+
+    @Override
+    boolean testPoint(String name) {
+      //      if (name.equals("startCommit")) {
+      if (RANDOM.nextInt(4) == 2)
+        Thread.yield();
+      return true;
+    }
+  }
+
+  private static abstract class TimedThread extends Thread {
+    volatile boolean failed;
+    int count;
+    private static float RUN_TIME_MSEC = atLeast(500);
+    private TimedThread[] allThreads;
+
+    abstract public void doWork() throws Throwable;
+
+    TimedThread(TimedThread[] threads) {
+      this.allThreads = threads;
+    }
+
+    @Override
+    public void run() {
+      final long stopTime = System.currentTimeMillis() + (long) RUN_TIME_MSEC;
+
+      count = 0;
+
+      try {
+        do {
+          if (anyErrors()) break;
+          doWork();
+          count++;
+        } while(System.currentTimeMillis() < stopTime);
+      } catch (Throwable e) {
+        System.out.println(Thread.currentThread().getName() + ": exc");
+        e.printStackTrace(System.out);
+        failed = true;
+      }
+    }
+
+    private boolean anyErrors() {
+      for(int i=0;i<allThreads.length;i++)
+        if (allThreads[i] != null && allThreads[i].failed)
+          return true;
+      return false;
+    }
+  }
+
+  private static class IndexerThread extends TimedThread {
+    IndexWriter writer;
+    public IndexerThread(IndexWriter writer, TimedThread[] threads) {
+      super(threads);
+      this.writer = writer;
+    }
+
+    @Override
+    public void doWork() throws Exception {
+      // Update all 100 docs...
+      for(int i=0; i<100; i++) {
+        Document d = new Document();
+        d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
+        d.add(new Field("contents", English.intToEnglish(i+10*count), Field.Store.NO, Field.Index.ANALYZED));
+        writer.updateDocument(new Term("id", Integer.toString(i)), d);
+      }
+    }
+  }
+
+  private static class SearcherThread extends TimedThread {
+    private Directory directory;
+
+    public SearcherThread(Directory directory, TimedThread[] threads) {
+      super(threads);
+      this.directory = directory;
+    }
+
+    @Override
+    public void doWork() throws Throwable {
+      IndexReader r = IndexReader.open(directory, true);
+      assertEquals(100, r.numDocs());
+      r.close();
+    }
+  }
+
+  /*
+    Run one indexer and 2 searchers against single index as
+    stress test.
+  */
+  public void runTest(Directory directory) throws Exception {
+
+    TimedThread[] threads = new TimedThread[4];
+
+    IndexWriterConfig conf = new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(7);
+    ((TieredMergePolicy) conf.getMergePolicy()).setMaxMergeAtOnce(3);
+    IndexWriter writer = new MockIndexWriter(directory, conf);
+    writer.setInfoStream(VERBOSE ? System.out : null);
+
+    // Establish a base index of 100 docs:
+    for(int i=0;i<100;i++) {
+      Document d = new Document();
+      d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
+      d.add(newField("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED));
+      if ((i-1)%7 == 0) {
+        writer.commit();
+      }
+      writer.addDocument(d);
+    }
+    writer.commit();
+
+    IndexReader r = IndexReader.open(directory, true);
+    assertEquals(100, r.numDocs());
+    r.close();
+
+    IndexerThread indexerThread = new IndexerThread(writer, threads);
+    threads[0] = indexerThread;
+    indexerThread.start();
+    
+    IndexerThread indexerThread2 = new IndexerThread(writer, threads);
+    threads[1] = indexerThread2;
+    indexerThread2.start();
+      
+    SearcherThread searcherThread1 = new SearcherThread(directory, threads);
+    threads[2] = searcherThread1;
+    searcherThread1.start();
+
+    SearcherThread searcherThread2 = new SearcherThread(directory, threads);
+    threads[3] = searcherThread2;
+    searcherThread2.start();
+
+    indexerThread.join();
+    indexerThread2.join();
+    searcherThread1.join();
+    searcherThread2.join();
+
+    writer.close();
+
+    assertTrue("hit unexpected exception in indexer", !indexerThread.failed);
+    assertTrue("hit unexpected exception in indexer2", !indexerThread2.failed);
+    assertTrue("hit unexpected exception in search1", !searcherThread1.failed);
+    assertTrue("hit unexpected exception in search2", !searcherThread2.failed);
+    //System.out.println("    Writer: " + indexerThread.count + " iterations");
+    //System.out.println("Searcher 1: " + searcherThread1.count + " searchers created");
+    //System.out.println("Searcher 2: " + searcherThread2.count + " searchers created");
+  }
+
+  /*
+    Run above stress test against RAMDirectory and then
+    FSDirectory.
+  */
+  public void testAtomicUpdates() throws Exception {
+    MockIndexWriter.RANDOM = random;
+    Directory directory;
+
+    // First in a RAM directory:
+    directory = new MockDirectoryWrapper(random, new RAMDirectory());
+    runTest(directory);
+    directory.close();
+
+    // Second in an FSDirectory:
+    File dirPath = _TestUtil.getTempDir("lucene.test.atomic");
+    directory = newFSDirectory(dirPath);
+    runTest(directory);
+    directory.close();
+    _TestUtil.rmDir(dirPath);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/backwards/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
new file mode 100644
index 0000000..8aba650
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -0,0 +1,800 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Random;
+
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.document.NumericField;
+import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.ReaderUtil;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import org.apache.lucene.util.Constants;
+
+/*
+  Verify we can read the pre-2.1 file format, do searches
+  against it, and add documents to it.
+*/
+
+public class TestBackwardsCompatibility extends LuceneTestCase {
+
+  // Uncomment these cases & run them on an older Lucene
+  // version, to generate an index to test backwards
+  // compatibility.  Then, cd to build/test/index.cfs and
+  // run "zip index.<VERSION>.cfs.zip *"; cd to
+  // build/test/index.nocfs and run "zip
+  // index.<VERSION>.nocfs.zip *".  Then move those 2 zip
+  // files to your trunk checkout and add them to the
+  // oldNames array.
+
+  /*
+  public void testCreateCFS() throws IOException {
+    createIndex("index.cfs", true, false);
+  }
+
+  public void testCreateNoCFS() throws IOException {
+    createIndex("index.nocfs", false, false);
+  }
+  */
+  
+  /*
+  // These are only needed for the special upgrade test to verify
+  // that also optimized indexes are correctly upgraded by IndexUpgrader.
+  // You don't need them to be build for non-3.1 (the test is happy with just one
+  // "old" segment format, version is unimportant:
+  
+  public void testCreateOptimizedCFS() throws IOException {
+    createIndex("index.optimized.cfs", true, true);
+  }
+
+  public void testCreateOptimizedNoCFS() throws IOException {
+    createIndex("index.optimized.nocfs", false, true);
+  }
+  */
+
+  final String[] oldNames = {"19.cfs",
+                             "19.nocfs",
+                             "20.cfs",
+                             "20.nocfs",
+                             "21.cfs",
+                             "21.nocfs",
+                             "22.cfs",
+                             "22.nocfs",
+                             "23.cfs",
+                             "23.nocfs",
+                             "24.cfs",
+                             "24.nocfs",
+                             "29.cfs",
+                             "29.nocfs",
+                             "30.cfs",
+                             "30.nocfs",
+                             "31.cfs",
+                             "31.nocfs",
+                             "32.cfs",
+                             "32.nocfs",
+  };
+  
+  final String[] oldOptimizedNames = {"31.optimized.cfs",
+                                      "31.optimized.nocfs",
+  };
+  
+  private void assertCompressedFields29(Directory dir, boolean shouldStillBeCompressed) throws IOException {
+    int count = 0;
+    final int TEXT_PLAIN_LENGTH = TEXT_TO_COMPRESS.length() * 2;
+    // FieldSelectorResult.SIZE returns 2*number_of_chars for String fields:
+    final int BINARY_PLAIN_LENGTH = BINARY_TO_COMPRESS.length;
+    
+    IndexReader reader = IndexReader.open(dir, true);
+    try {
+      // look into sub readers and check if raw merge is on/off
+      List<IndexReader> readers = new ArrayList<IndexReader>();
+      ReaderUtil.gatherSubReaders(readers, reader);
+      for (IndexReader ir : readers) {
+        final FieldsReader fr = ((SegmentReader) ir).getFieldsReader();
+        assertTrue("for a 2.9 index, FieldsReader.canReadRawDocs() must be false and other way round for a trunk index",
+          shouldStillBeCompressed != fr.canReadRawDocs());
+      }
+    
+      // test that decompression works correctly
+      for(int i=0; i<reader.maxDoc(); i++) {
+        if (!reader.isDeleted(i)) {
+          Document d = reader.document(i);
+          if (d.get("content3") != null) continue;
+          count++;
+          Fieldable compressed = d.getFieldable("compressed");
+          if (Integer.parseInt(d.get("id")) % 2 == 0) {
+            assertFalse(compressed.isBinary());
+            assertEquals("incorrectly decompressed string", TEXT_TO_COMPRESS, compressed.stringValue());
+          } else {
+            assertTrue(compressed.isBinary());
+            assertTrue("incorrectly decompressed binary", Arrays.equals(BINARY_TO_COMPRESS, compressed.getBinaryValue()));
+          }
+        }
+      }
+      
+      // check if field was decompressed after optimize
+      for(int i=0; i<reader.maxDoc(); i++) {
+        if (!reader.isDeleted(i)) {
+          Document d = reader.document(i, new FieldSelector() {
+            public FieldSelectorResult accept(String fieldName) {
+              return ("compressed".equals(fieldName)) ? FieldSelectorResult.SIZE : FieldSelectorResult.LOAD;
+            }
+          });
+          if (d.get("content3") != null) continue;
+          count++;
+          // read the size from the binary value using DataInputStream (this prevents us from doing the shift ops ourselves):
+          final DataInputStream ds = new DataInputStream(new ByteArrayInputStream(d.getFieldable("compressed").getBinaryValue()));
+          final int actualSize = ds.readInt();
+          ds.close();
+          final int compressedSize = Integer.parseInt(d.get("compressedSize"));
+          final boolean binary = Integer.parseInt(d.get("id")) % 2 > 0;
+          final int shouldSize = shouldStillBeCompressed ?
+            compressedSize :
+            (binary ? BINARY_PLAIN_LENGTH : TEXT_PLAIN_LENGTH);
+          assertEquals("size incorrect", shouldSize, actualSize);
+          if (!shouldStillBeCompressed) {
+            assertFalse("uncompressed field should have another size than recorded in index", compressedSize == actualSize);
+          }
+        }
+      }
+      assertEquals("correct number of tests", 34 * 2, count);
+    } finally {
+      reader.close();
+    }
+  }
+
+  public void testUpgrade29Compression() throws IOException {
+    int hasTested29 = 0;
+    
+    for(int i=0;i<oldNames.length;i++) {
+      File oldIndxeDir = _TestUtil.getTempDir(oldNames[i]);
+      _TestUtil.unzip(getDataFile("index." + oldNames[i] + ".zip"), oldIndxeDir);
+      Directory dir = newFSDirectory(oldIndxeDir);
+
+      if (oldNames[i].startsWith("29.")) {
+        assertCompressedFields29(dir, true);
+        hasTested29++;
+      }
+
+      new IndexUpgrader(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, null), VERBOSE ? System.out : null, false)
+        .upgrade();
+
+      if (oldNames[i].startsWith("29.")) {
+        assertCompressedFields29(dir, false);
+        hasTested29++;
+      }
+
+      dir.close();
+      _TestUtil.rmDir(oldIndxeDir);
+    }
+    
+    assertEquals("test for compressed field should have run 4 times", 4, hasTested29);
+  }
+
+  public void testAddOldIndexes() throws IOException {
+    for (String name : oldNames) {
+      File oldIndxeDir = _TestUtil.getTempDir(name);
+      _TestUtil.unzip(getDataFile("index." + name + ".zip"), oldIndxeDir);
+      Directory dir = newFSDirectory(oldIndxeDir);
+
+      Directory targetDir = newDirectory();
+      IndexWriter w = new IndexWriter(targetDir, newIndexWriterConfig(
+          TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+      w.addIndexes(new Directory[] { dir });
+      w.close();
+      
+      dir.close();
+      targetDir.close();
+      _TestUtil.rmDir(oldIndxeDir);
+    }
+  }
+
+  public void testAddOldIndexesReader() throws IOException {
+    for (String name : oldNames) {
+      File oldIndxeDir = _TestUtil.getTempDir(name);
+      _TestUtil.unzip(getDataFile("index." + name + ".zip"), oldIndxeDir);
+      Directory dir = newFSDirectory(oldIndxeDir);
+      IndexReader reader = IndexReader.open(dir);
+      
+      Directory targetDir = newDirectory();
+      IndexWriter w = new IndexWriter(targetDir, newIndexWriterConfig(
+          TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+      w.addIndexes(new IndexReader[] { reader });
+      w.close();
+      reader.close();
+            
+      dir.close();
+      targetDir.close();
+      _TestUtil.rmDir(oldIndxeDir);
+    }
+  }
+
+  public void testSearchOldIndex() throws IOException {
+    for(int i=0;i<oldNames.length;i++) {
+      File oldIndxeDir = _TestUtil.getTempDir(oldNames[i]);
+      _TestUtil.unzip(getDataFile("index." + oldNames[i] + ".zip"), oldIndxeDir);
+      searchIndex(oldIndxeDir, oldNames[i]);
+      _TestUtil.rmDir(oldIndxeDir);
+    }
+  }
+
+  public void testIndexOldIndexNoAdds() throws IOException {
+    for(int i=0;i<oldNames.length;i++) {
+      File oldIndxeDir = _TestUtil.getTempDir(oldNames[i]);
+      _TestUtil.unzip(getDataFile("index." + oldNames[i] + ".zip"), oldIndxeDir);
+      changeIndexNoAdds(random, oldIndxeDir);
+      _TestUtil.rmDir(oldIndxeDir);
+    }
+  }
+
+  public void testIndexOldIndex() throws IOException {
+    for(int i=0;i<oldNames.length;i++) {
+      if (VERBOSE) {
+        System.out.println("TEST: oldName=" + oldNames[i]);
+      }
+      File oldIndxeDir = _TestUtil.getTempDir(oldNames[i]);
+      _TestUtil.unzip(getDataFile("index." + oldNames[i] + ".zip"), oldIndxeDir);
+      changeIndexWithAdds(random, oldIndxeDir, oldNames[i]);
+      _TestUtil.rmDir(oldIndxeDir);
+    }
+  }
+
+  private void testHits(ScoreDoc[] hits, int expectedCount, IndexReader reader) throws IOException {
+    final int hitCount = hits.length;
+    assertEquals("wrong number of hits", expectedCount, hitCount);
+    for(int i=0;i<hitCount;i++) {
+      reader.document(hits[i].doc);
+      reader.getTermFreqVectors(hits[i].doc);
+    }
+  }
+
+  public void searchIndex(File indexDir, String oldName) throws IOException {
+    //QueryParser parser = new QueryParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
+    //Query query = parser.parse("handle:1");
+
+    Directory dir = newFSDirectory(indexDir);
+    IndexSearcher searcher = new IndexSearcher(dir, true);
+    IndexReader reader = searcher.getIndexReader();
+
+    _TestUtil.checkIndex(dir);
+
+    for(int i=0;i<35;i++) {
+      if (!reader.isDeleted(i)) {
+        Document d = reader.document(i);
+        List<Fieldable> fields = d.getFields();
+        if (!oldName.startsWith("19.") &&
+            !oldName.startsWith("20.") &&
+            !oldName.startsWith("21.") &&
+            !oldName.startsWith("22.")) {
+
+          if (d.getField("content3") == null) {
+            final int numFields = oldName.startsWith("29.") ? 7 : 5;
+            assertEquals(numFields, fields.size());
+            Field f =  d.getField("id");
+            assertEquals(""+i, f.stringValue());
+
+            f = d.getField("utf8");
+            assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue());
+
+            f =  d.getField("autf8");
+            assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue());
+        
+            f = d.getField("content2");
+            assertEquals("here is more content with aaa aaa aaa", f.stringValue());
+
+            f = d.getField("fie\u2C77ld");
+            assertEquals("field with non-ascii name", f.stringValue());
+          }
+
+          TermFreqVector tfv = reader.getTermFreqVector(i, "utf8");
+          assertNotNull("docID=" + i + " index=" + indexDir.getName(), tfv);
+          assertTrue(tfv instanceof TermPositionVector);
+        }       
+      } else
+        // Only ID 7 is deleted
+        assertEquals(7, i);
+    }
+    
+    ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
+
+    // First document should be #21 since it's norm was
+    // increased:
+    Document d = searcher.doc(hits[0].doc);
+    assertEquals("didn't get the right document first", "21", d.get("id"));
+
+    testHits(hits, 34, searcher.getIndexReader());
+
+    if (!oldName.startsWith("19.") &&
+        !oldName.startsWith("20.") &&
+        !oldName.startsWith("21.") &&
+        !oldName.startsWith("22.")) {
+      // Test on indices >= 2.3
+      hits = searcher.search(new TermQuery(new Term("utf8", "\u0000")), null, 1000).scoreDocs;
+      assertEquals(34, hits.length);
+      hits = searcher.search(new TermQuery(new Term("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne")), null, 1000).scoreDocs;
+      assertEquals(34, hits.length);
+      hits = searcher.search(new TermQuery(new Term("utf8", "ab\ud917\udc17cd")), null, 1000).scoreDocs;
+      assertEquals(34, hits.length);
+    }
+
+    searcher.close();
+    dir.close();
+  }
+
+  private int compare(String name, String v) {
+    int v0 = Integer.parseInt(name.substring(0, 2));
+    int v1 = Integer.parseInt(v);
+    return v0 - v1;
+  }
+
+  /* Open pre-lockless index, add docs, do a delete &
+   * setNorm, and search */
+  public void changeIndexWithAdds(Random random, File oldIndexDir, String origOldName) throws IOException {
+    Directory dir = newFSDirectory(oldIndexDir);
+    // open writer
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND));
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    // add 10 docs
+    for(int i=0;i<10;i++) {
+      addDoc(writer, 35+i);
+    }
+
+    // make sure writer sees right total -- writer seems not to know about deletes in .del?
+    final int expected;
+    if (compare(origOldName, "24") < 0) {
+      expected = 44;
+    } else {
+      expected = 45;
+    }
+    assertEquals("wrong doc count", expected, writer.numDocs());
+    writer.close();
+
+    // make sure searching sees right # hits
+    IndexSearcher searcher = new IndexSearcher(dir, true);
+    ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
+    Document d = searcher.doc(hits[0].doc);
+    assertEquals("wrong first document", "21", d.get("id"));
+    testHits(hits, 44, searcher.getIndexReader());
+    searcher.close();
+
+    // make sure we can do delete & setNorm against this
+    // pre-lockless segment:
+    IndexReader reader = IndexReader.open(dir, false);
+    searcher = newSearcher(reader);
+    Term searchTerm = new Term("id", "6");
+    int delCount = reader.deleteDocuments(searchTerm);
+    assertEquals("wrong delete count", 1, delCount);
+    reader.setNorm(searcher.search(new TermQuery(new Term("id", "22")), 10).scoreDocs[0].doc, "content", (float) 2.0);
+    reader.close();
+    searcher.close();
+
+    // make sure they "took":
+    searcher = new IndexSearcher(dir, true);
+    hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
+    assertEquals("wrong number of hits", 43, hits.length);
+    d = searcher.doc(hits[0].doc);
+    assertEquals("wrong first document", "22", d.get("id"));
+    testHits(hits, 43, searcher.getIndexReader());
+    searcher.close();
+
+    // optimize
+    writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND));
+    writer.optimize();
+    writer.close();
+
+    searcher = new IndexSearcher(dir, true);
+    hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
+    assertEquals("wrong number of hits", 43, hits.length);
+    d = searcher.doc(hits[0].doc);
+    testHits(hits, 43, searcher.getIndexReader());
+    assertEquals("wrong first document", "22", d.get("id"));
+    searcher.close();
+
+    dir.close();
+  }
+
+  /* Open pre-lockless index, add docs, do a delete &
+   * setNorm, and search */
+  public void changeIndexNoAdds(Random random, File oldIndexDir) throws IOException {
+
+    Directory dir = newFSDirectory(oldIndexDir);
+
+    // make sure searching sees right # hits
+    IndexSearcher searcher = new IndexSearcher(dir, true);
+    ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
+    assertEquals("wrong number of hits", 34, hits.length);
+    Document d = searcher.doc(hits[0].doc);
+    assertEquals("wrong first document", "21", d.get("id"));
+    searcher.close();
+
+    // make sure we can do a delete & setNorm against this
+    // pre-lockless segment:
+    IndexReader reader = IndexReader.open(dir, false);
+    Term searchTerm = new Term("id", "6");
+    int delCount = reader.deleteDocuments(searchTerm);
+    assertEquals("wrong delete count", 1, delCount);
+    reader.setNorm(22, "content", (float) 2.0);
+    reader.close();
+
+    // make sure they "took":
+    searcher = new IndexSearcher(dir, true);
+    hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
+    assertEquals("wrong number of hits", 33, hits.length);
+    d = searcher.doc(hits[0].doc);
+    assertEquals("wrong first document", "22", d.get("id"));
+    testHits(hits, 33, searcher.getIndexReader());
+    searcher.close();
+
+    // optimize
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND));
+    writer.optimize();
+    writer.close();
+
+    searcher = new IndexSearcher(dir, true);
+    hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
+    assertEquals("wrong number of hits", 33, hits.length);
+    d = searcher.doc(hits[0].doc);
+    assertEquals("wrong first document", "22", d.get("id"));
+    testHits(hits, 33, searcher.getIndexReader());
+    searcher.close();
+
+    dir.close();
+  }
+
+  public File createIndex(String dirName, boolean doCFS, boolean optimized) throws IOException {
+    // we use a real directory name that is not cleaned up, because this method is only used to create backwards indexes:
+    File indexDir = new File(LuceneTestCase.TEMP_DIR, dirName);
+    _TestUtil.rmDir(indexDir);
+    Directory dir = newFSDirectory(indexDir);
+    LogByteSizeMergePolicy mp = new LogByteSizeMergePolicy();
+    mp.setUseCompoundFile(doCFS);
+    mp.setNoCFSRatio(1.0);
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
+      .setMaxBufferedDocs(10).setMergePolicy(mp);
+    IndexWriter writer = new IndexWriter(dir, conf);
+    
+    for(int i=0;i<35;i++) {
+      addDoc(writer, i);
+    }
+    assertEquals("wrong doc count", 35, writer.maxDoc());
+    if (optimized) {
+      writer.optimize();
+    }
+    writer.close();
+
+    if (!optimized) {
+      // open fresh writer so we get no prx file in the added segment
+      mp = new LogByteSizeMergePolicy();
+      mp.setUseCompoundFile(doCFS);
+      mp.setNoCFSRatio(1.0);
+      conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
+        .setMaxBufferedDocs(10).setMergePolicy(mp);
+      writer = new IndexWriter(dir, conf);
+      addNoProxDoc(writer);
+      writer.close();
+
+      // Delete one doc so we get a .del file:
+      IndexReader reader = IndexReader.open(dir, false);
+      Term searchTerm = new Term("id", "7");
+      int delCount = reader.deleteDocuments(searchTerm);
+      assertEquals("didn't delete the right number of documents", 1, delCount);
+
+      // Set one norm so we get a .s0 file:
+      reader.setNorm(21, "content", (float) 1.5);
+      reader.close();
+    }
+    
+    dir.close();
+    
+    return indexDir;
+  }
+
+  /* Verifies that the expected file names were produced */
+
+  public void testExactFileNames() throws IOException {
+
+    String outputDirName = "lucene.backwardscompat0.index";
+    File outputDir = _TestUtil.getTempDir(outputDirName);
+    _TestUtil.rmDir(outputDir);
+
+    try {
+      Directory dir = newFSDirectory(outputDir);
+
+      LogMergePolicy mergePolicy = newLogMergePolicy(true, 10);
+      mergePolicy.setNoCFSRatio(1); // This test expects all of its segments to be in CFS
+      IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setMaxBufferedDocs(-1).setRAMBufferSizeMB(16.0)
+        .setMergePolicy(mergePolicy);
+      IndexWriter writer = new IndexWriter(dir, conf);
+      for(int i=0;i<35;i++) {
+        addDoc(writer, i);
+      }
+      assertEquals("wrong doc count", 35, writer.maxDoc());
+      writer.close();
+
+      // Delete one doc so we get a .del file:
+      IndexReader reader = IndexReader.open(dir, false);
+      Term searchTerm = new Term("id", "7");
+      int delCount = reader.deleteDocuments(searchTerm);
+      assertEquals("didn't delete the right number of documents", 1, delCount);
+
+      // Set one norm so we get a .s0 file:
+      reader.setNorm(21, "content", (float) 1.5);
+      reader.close();
+
+      // The numbering of fields can vary depending on which
+      // JRE is in use.  On some JREs we see content bound to
+      // field 0; on others, field 1.  So, here we have to
+      // figure out which field number corresponds to
+      // "content", and then set our expected file names below
+      // accordingly:
+      CompoundFileReader cfsReader = new CompoundFileReader(dir, "_0.cfs");
+      FieldInfos fieldInfos = new FieldInfos(cfsReader, "_0.fnm");
+      int contentFieldIndex = -1;
+      for(int i=0;i<fieldInfos.size();i++) {
+        FieldInfo fi = fieldInfos.fieldInfo(i);
+        if (fi.name.equals("content")) {
+          contentFieldIndex = i;
+          break;
+        }
+      }
+      cfsReader.close();
+      assertTrue("could not locate the 'content' field number in the _2.cfs segment", contentFieldIndex != -1);
+
+      // Now verify file names:
+      String[] expected = new String[] {"_0.cfs",
+                               "_0_1.del",
+                               "_0_1.s" + contentFieldIndex,
+                               "segments_2",
+                               "segments.gen"};
+
+      String[] actual = dir.listAll();
+      Arrays.sort(expected);
+      Arrays.sort(actual);
+      if (!Arrays.equals(expected, actual)) {
+        fail("incorrect filenames in index: expected:\n    " + asString(expected) + "\n  actual:\n    " + asString(actual));
+      }
+      dir.close();
+    } finally {
+      _TestUtil.rmDir(outputDir);
+    }
+  }
+
+  private String asString(String[] l) {
+    String s = "";
+    for(int i=0;i<l.length;i++) {
+      if (i > 0) {
+        s += "\n    ";
+      }
+      s += l[i];
+    }
+    return s;
+  }
+
+  private void addDoc(IndexWriter writer, int id) throws IOException
+  {
+    Document doc = new Document();
+    doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(new Field("id", Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED));
+    doc.add(new Field("autf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("content2", "here is more content with aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("fie\u2C77ld", "field with non-ascii name", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    /* This was used in 2.9 to generate an index with compressed field:
+    if (id % 2 == 0) {
+      doc.add(new Field("compressed", TEXT_TO_COMPRESS, Field.Store.COMPRESS, Field.Index.NOT_ANALYZED));
+      doc.add(new Field("compressedSize", Integer.toString(TEXT_COMPRESSED_LENGTH), Field.Store.YES, Field.Index.NOT_ANALYZED));
+    } else {
+      doc.add(new Field("compressed", BINARY_TO_COMPRESS, Field.Store.COMPRESS));    
+      doc.add(new Field("compressedSize", Integer.toString(BINARY_COMPRESSED_LENGTH), Field.Store.YES, Field.Index.NOT_ANALYZED));
+    }
+    */
+    // add numeric fields, to test if later versions preserve encoding
+    doc.add(new NumericField("trieInt", 4).setIntValue(id));
+    doc.add(new NumericField("trieLong", 4).setLongValue(id));
+    writer.addDocument(doc);
+  }
+
+  private void addNoProxDoc(IndexWriter writer) throws IOException {
+    Document doc = new Document();
+    Field f = new Field("content3", "aaa", Field.Store.YES, Field.Index.ANALYZED);
+    f.setIndexOptions(IndexOptions.DOCS_ONLY);
+    doc.add(f);
+    f = new Field("content4", "aaa", Field.Store.YES, Field.Index.NO);
+    f.setIndexOptions(IndexOptions.DOCS_ONLY);
+    doc.add(f);
+    writer.addDocument(doc);
+  }
+
+  static final String TEXT_TO_COMPRESS = "this is a compressed field and should appear in 3.0 as an uncompressed field after merge";
+  // FieldSelectorResult.SIZE returns compressed size for compressed fields, which are internally handled as binary;
+  // do it in the same way like FieldsWriter, do not use CompressionTools.compressString() for compressed fields:
+  /* This was used in 2.9 to generate an index with compressed field:
+  static final int TEXT_COMPRESSED_LENGTH;
+  static {
+    try {
+      TEXT_COMPRESSED_LENGTH = CompressionTools.compress(TEXT_TO_COMPRESS.getBytes("UTF-8")).length;
+    } catch (Exception e) {
+      throw new RuntimeException();
+    }
+  }
+  */
+  static final byte[] BINARY_TO_COMPRESS = new byte[]{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20};
+  /* This was used in 2.9 to generate an index with compressed field:
+  static final int BINARY_COMPRESSED_LENGTH = CompressionTools.compress(BINARY_TO_COMPRESS).length;
+  */
+  
+  public void testNumericFields() throws Exception {
+    for(int i=0;i<oldNames.length;i++) {
+      // only test indexes >= 3.0
+      if (oldNames[i].compareTo("30.") < 0) continue;
+      
+      File oldIndexDir = _TestUtil.getTempDir(oldNames[i]);
+      _TestUtil.unzip(getDataFile("index." + oldNames[i] + ".zip"), oldIndexDir);
+      Directory dir = newFSDirectory(oldIndexDir);
+      IndexSearcher searcher = new IndexSearcher(dir, true);
+      
+      for (int id=10; id<15; id++) {
+        ScoreDoc[] hits = searcher.search(NumericRangeQuery.newIntRange("trieInt", 4, Integer.valueOf(id), Integer.valueOf(id), true, true), 100).scoreDocs;
+        assertEquals("wrong number of hits", 1, hits.length);
+        Document d = searcher.doc(hits[0].doc);
+        assertEquals(String.valueOf(id), d.get("id"));
+        
+        hits = searcher.search(NumericRangeQuery.newLongRange("trieLong", 4, Long.valueOf(id), Long.valueOf(id), true, true), 100).scoreDocs;
+        assertEquals("wrong number of hits", 1, hits.length);
+        d = searcher.doc(hits[0].doc);
+        assertEquals(String.valueOf(id), d.get("id"));
+      }
+      
+      // check that also lower-precision fields are ok
+      ScoreDoc[] hits = searcher.search(NumericRangeQuery.newIntRange("trieInt", 4, Integer.MIN_VALUE, Integer.MAX_VALUE, false, false), 100).scoreDocs;
+      assertEquals("wrong number of hits", 34, hits.length);
+      
+      hits = searcher.search(NumericRangeQuery.newLongRange("trieLong", 4, Long.MIN_VALUE, Long.MAX_VALUE, false, false), 100).scoreDocs;
+      assertEquals("wrong number of hits", 34, hits.length);
+      
+      // check decoding into field cache
+      int[] fci = FieldCache.DEFAULT.getInts(searcher.getIndexReader(), "trieInt");
+      for (int val : fci) {
+        assertTrue("value in id bounds", val >= 0 && val < 35);
+      }
+      
+      long[] fcl = FieldCache.DEFAULT.getLongs(searcher.getIndexReader(), "trieLong");
+      for (long val : fcl) {
+        assertTrue("value in id bounds", val >= 0L && val < 35L);
+      }
+      
+      searcher.close();
+      dir.close();
+      _TestUtil.rmDir(oldIndexDir);
+    }
+  }
+  
+  private int checkAllSegmentsUpgraded(Directory dir) throws IOException {
+    final SegmentInfos infos = new SegmentInfos();
+    infos.read(dir);
+    if (VERBOSE) {
+      System.out.println("checkAllSegmentsUpgraded: " + infos);
+    }
+    for (SegmentInfo si : infos) {
+      assertEquals(Constants.LUCENE_MAIN_VERSION, si.getVersion());
+    }
+    return infos.size();
+  }
+  
+  private int getNumberOfSegments(Directory dir) throws IOException {
+    final SegmentInfos infos = new SegmentInfos();
+    infos.read(dir);
+    return infos.size();
+  }
+
+  public void testUpgradeOldIndex() throws Exception {
+    List<String> names = new ArrayList<String>(oldNames.length + oldOptimizedNames.length);
+    names.addAll(Arrays.asList(oldNames));
+    names.addAll(Arrays.asList(oldOptimizedNames));
+    for(String name : names) {
+      if (VERBOSE) {
+        System.out.println("testUpgradeOldIndex: index=" +name);
+      }
+      File oldIndxeDir = _TestUtil.getTempDir(name);
+      _TestUtil.unzip(getDataFile("index." + name + ".zip"), oldIndxeDir);
+      Directory dir = newFSDirectory(oldIndxeDir);
+
+      new IndexUpgrader(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, null), VERBOSE ? System.out : null, false)
+        .upgrade();
+
+      checkAllSegmentsUpgraded(dir);
+      
+      dir.close();
+      _TestUtil.rmDir(oldIndxeDir);
+    }
+  }
+
+  public void testUpgradeOldOptimizedIndexWithAdditions() throws Exception {
+    for (String name : oldOptimizedNames) {
+      if (VERBOSE) {
+        System.out.println("testUpgradeOldOptimizedIndexWithAdditions: index=" +name);
+      }
+      File oldIndxeDir = _TestUtil.getTempDir(name);
+      _TestUtil.unzip(getDataFile("index." + name + ".zip"), oldIndxeDir);
+      Directory dir = newFSDirectory(oldIndxeDir);
+
+      assertEquals("Original index must be optimized", 1, getNumberOfSegments(dir));
+
+      // create a bunch of dummy segments
+      int id = 40;
+      RAMDirectory ramDir = new RAMDirectory();
+      for (int i = 0; i < 3; i++) {
+        // only use Log- or TieredMergePolicy, to make document addition predictable and not suddenly merge:
+        MergePolicy mp = random.nextBoolean() ? newLogMergePolicy() : newTieredMergePolicy();
+        IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
+          .setMergePolicy(mp);
+        IndexWriter w = new IndexWriter(ramDir, iwc);
+        // add few more docs:
+        for(int j = 0; j < RANDOM_MULTIPLIER * random.nextInt(30); j++) {
+          addDoc(w, id++);
+        }
+        w.close(false);
+      }
+      
+      // add dummy segments (which are all in current version) to optimized index
+      MergePolicy mp = random.nextBoolean() ? newLogMergePolicy() : newTieredMergePolicy();
+      IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, null)
+        .setMergePolicy(mp);
+      IndexWriter w = new IndexWriter(dir, iwc);
+      w.setInfoStream(VERBOSE ? System.out : null);
+      w.addIndexes(ramDir);
+      w.close(false);
+      
+      // determine count of segments in modified index
+      final int origSegCount = getNumberOfSegments(dir);
+      
+      new IndexUpgrader(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, null), VERBOSE ? System.out : null, false)
+        .upgrade();
+
+      final int segCount = checkAllSegmentsUpgraded(dir);
+      assertEquals("Index must still contain the same number of segments, as only one segment was upgraded and nothing else merged",
+        origSegCount, segCount);
+      
+      dir.close();
+      _TestUtil.rmDir(oldIndxeDir);
+    }
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestByteSlices.java b/lucene/backwards/src/test/org/apache/lucene/index/TestByteSlices.java
new file mode 100644
index 0000000..07c6e78
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestByteSlices.java
@@ -0,0 +1,118 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestByteSlices extends LuceneTestCase {
+
+  private static class ByteBlockAllocator extends ByteBlockPool.Allocator {
+    ArrayList<byte[]> freeByteBlocks = new ArrayList<byte[]>();
+    
+    /* Allocate another byte[] from the shared pool */
+    @Override
+    synchronized byte[] getByteBlock() {
+      final int size = freeByteBlocks.size();
+      final byte[] b;
+      if (0 == size)
+        b = new byte[DocumentsWriter.BYTE_BLOCK_SIZE];
+      else
+        b =  freeByteBlocks.remove(size-1);
+      return b;
+    }
+
+    /* Return a byte[] to the pool */
+    @Override
+    synchronized void recycleByteBlocks(byte[][] blocks, int start, int end) {
+      for(int i=start;i<end;i++)
+        freeByteBlocks.add(blocks[i]);
+    }
+
+    @Override
+    synchronized void recycleByteBlocks(List<byte[]> blocks) {
+      final int size = blocks.size();
+      for(int i=0;i<size;i++)
+        freeByteBlocks.add(blocks.get(i));
+    }
+  }
+
+  public void testBasic() throws Throwable {
+    ByteBlockPool pool = new ByteBlockPool(new ByteBlockAllocator());
+
+    final int NUM_STREAM = atLeast(100);
+
+    ByteSliceWriter writer = new ByteSliceWriter(pool);
+
+    int[] starts = new int[NUM_STREAM];
+    int[] uptos = new int[NUM_STREAM];
+    int[] counters = new int[NUM_STREAM];
+
+    ByteSliceReader reader = new ByteSliceReader();
+
+    for(int ti=0;ti<100;ti++) {
+
+      for(int stream=0;stream<NUM_STREAM;stream++) {
+        starts[stream] = -1;
+        counters[stream] = 0;
+      }
+      
+      int num = atLeast(10000);
+      for (int iter = 0; iter < num; iter++) {
+        int stream = random.nextInt(NUM_STREAM);
+        if (VERBOSE)
+          System.out.println("write stream=" + stream);
+
+        if (starts[stream] == -1) {
+          final int spot = pool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
+          starts[stream] = uptos[stream] = spot + pool.byteOffset;
+          if (VERBOSE)
+            System.out.println("  init to " + starts[stream]);
+        }
+
+        writer.init(uptos[stream]);
+        int numValue = random.nextInt(20);
+        for(int j=0;j<numValue;j++) {
+          if (VERBOSE)
+            System.out.println("    write " + (counters[stream]+j));
+          // write some large (incl. negative) ints:
+          writer.writeVInt(random.nextInt());
+          writer.writeVInt(counters[stream]+j);
+        }
+        counters[stream] += numValue;
+        uptos[stream] = writer.getAddress();
+        if (VERBOSE)
+          System.out.println("    addr now " + uptos[stream]);
+      }
+    
+      for(int stream=0;stream<NUM_STREAM;stream++) {
+        if (VERBOSE)
+          System.out.println("  stream=" + stream + " count=" + counters[stream]);
+
+        if (starts[stream] != -1 && starts[stream] != uptos[stream]) {
+          reader.init(pool, starts[stream], uptos[stream]);
+          for(int j=0;j<counters[stream];j++) {
+            reader.readVInt();
+            assertEquals(j, reader.readVInt()); 
+          }
+        }
+      }
+
+      pool.reset();
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestCheckIndex.java b/lucene/backwards/src/test/org/apache/lucene/index/TestCheckIndex.java
new file mode 100644
index 0000000..9c6355a
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestCheckIndex.java
@@ -0,0 +1,104 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import java.util.List;
+import java.util.ArrayList;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.util.Constants;
+
+public class TestCheckIndex extends LuceneTestCase {
+
+  public void testDeletedDocs() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
+    Document doc = new Document();
+    doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    for(int i=0;i<19;i++) {
+      writer.addDocument(doc);
+    }
+    writer.optimize();
+    writer.close();
+    IndexReader reader = IndexReader.open(dir, false);
+    reader.deleteDocument(5);
+    reader.close();
+
+    ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
+    CheckIndex checker = new CheckIndex(dir);
+    checker.setInfoStream(new PrintStream(bos));
+    if (VERBOSE) checker.setInfoStream(System.out);
+    CheckIndex.Status indexStatus = checker.checkIndex();
+    if (indexStatus.clean == false) {
+      System.out.println("CheckIndex failed");
+      System.out.println(bos.toString());
+      fail();
+    }
+    
+    final CheckIndex.Status.SegmentInfoStatus seg = indexStatus.segmentInfos.get(0);
+    assertTrue(seg.openReaderPassed);
+
+    assertNotNull(seg.diagnostics);
+    
+    assertNotNull(seg.fieldNormStatus);
+    assertNull(seg.fieldNormStatus.error);
+    assertEquals(1, seg.fieldNormStatus.totFields);
+
+    assertNotNull(seg.termIndexStatus);
+    assertNull(seg.termIndexStatus.error);
+    assertEquals(1, seg.termIndexStatus.termCount);
+    assertEquals(19, seg.termIndexStatus.totFreq);
+    assertEquals(18, seg.termIndexStatus.totPos);
+
+    assertNotNull(seg.storedFieldStatus);
+    assertNull(seg.storedFieldStatus.error);
+    assertEquals(18, seg.storedFieldStatus.docCount);
+    assertEquals(18, seg.storedFieldStatus.totFields);
+
+    assertNotNull(seg.termVectorStatus);
+    assertNull(seg.termVectorStatus.error);
+    assertEquals(18, seg.termVectorStatus.docCount);
+    assertEquals(18, seg.termVectorStatus.totVectors);
+
+    assertTrue(seg.diagnostics.size() > 0);
+    final List<String> onlySegments = new ArrayList<String>();
+    onlySegments.add("_0");
+    
+    assertTrue(checker.checkIndex(onlySegments).clean == true);
+    dir.close();
+  }
+
+  public void testLuceneConstantVersion() throws IOException {
+    // common-build.xml sets lucene.version
+    final String version = System.getProperty("lucene.version");
+    assertNotNull( "null version", version);
+    assertTrue("Invalid version: "+version,
+               version.equals(Constants.LUCENE_MAIN_VERSION+"-SNAPSHOT") ||
+               version.equals(Constants.LUCENE_MAIN_VERSION));
+    assertTrue(version + " should start with: "+Constants.LUCENE_VERSION,
+               Constants.LUCENE_VERSION.startsWith(version));
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestCompoundFile.java b/lucene/backwards/src/test/org/apache/lucene/index/TestCompoundFile.java
new file mode 100644
index 0000000..16892a8
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestCompoundFile.java
@@ -0,0 +1,688 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.File;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.MockDirectoryWrapper.Failure;
+import org.apache.lucene.store.SimpleFSDirectory;
+import org.apache.lucene.store._TestHelper;
+import org.apache.lucene.util._TestUtil;
+
+
+public class TestCompoundFile extends LuceneTestCase
+{
+    private Directory dir;
+
+    @Override
+    public void setUp() throws Exception {
+       super.setUp();
+       File file = _TestUtil.getTempDir("testIndex");
+       // use a simple FSDir here, to be sure to have SimpleFSInputs
+       dir = new SimpleFSDirectory(file,null);
+    }
+
+    @Override
+    public void tearDown() throws Exception {
+       dir.close();
+       super.tearDown();
+    }
+
+    /** Creates a file of the specified size with random data. */
+    private void createRandomFile(Directory dir, String name, int size)
+    throws IOException
+    {
+        IndexOutput os = dir.createOutput(name);
+        for (int i=0; i<size; i++) {
+            byte b = (byte) (Math.random() * 256);
+            os.writeByte(b);
+        }
+        os.close();
+    }
+
+    /** Creates a file of the specified size with sequential data. The first
+     *  byte is written as the start byte provided. All subsequent bytes are
+     *  computed as start + offset where offset is the number of the byte.
+     */
+    private void createSequenceFile(Directory dir,
+                                    String name,
+                                    byte start,
+                                    int size)
+    throws IOException
+    {
+        IndexOutput os = dir.createOutput(name);
+        for (int i=0; i < size; i++) {
+            os.writeByte(start);
+            start ++;
+        }
+        os.close();
+    }
+
+
+    private void assertSameStreams(String msg,
+                                   IndexInput expected,
+                                   IndexInput test)
+    throws IOException
+    {
+        assertNotNull(msg + " null expected", expected);
+        assertNotNull(msg + " null test", test);
+        assertEquals(msg + " length", expected.length(), test.length());
+        assertEquals(msg + " position", expected.getFilePointer(),
+                                        test.getFilePointer());
+
+        byte expectedBuffer[] = new byte[512];
+        byte testBuffer[] = new byte[expectedBuffer.length];
+
+        long remainder = expected.length() - expected.getFilePointer();
+        while(remainder > 0) {
+            int readLen = (int) Math.min(remainder, expectedBuffer.length);
+            expected.readBytes(expectedBuffer, 0, readLen);
+            test.readBytes(testBuffer, 0, readLen);
+            assertEqualArrays(msg + ", remainder " + remainder, expectedBuffer,
+                testBuffer, 0, readLen);
+            remainder -= readLen;
+        }
+    }
+
+
+    private void assertSameStreams(String msg,
+                                   IndexInput expected,
+                                   IndexInput actual,
+                                   long seekTo)
+    throws IOException
+    {
+        if(seekTo >= 0 && seekTo < expected.length())
+        {
+            expected.seek(seekTo);
+            actual.seek(seekTo);
+            assertSameStreams(msg + ", seek(mid)", expected, actual);
+        }
+    }
+
+
+
+    private void assertSameSeekBehavior(String msg,
+                                        IndexInput expected,
+                                        IndexInput actual)
+    throws IOException
+    {
+        // seek to 0
+        long point = 0;
+        assertSameStreams(msg + ", seek(0)", expected, actual, point);
+
+        // seek to middle
+        point = expected.length() / 2l;
+        assertSameStreams(msg + ", seek(mid)", expected, actual, point);
+
+        // seek to end - 2
+        point = expected.length() - 2;
+        assertSameStreams(msg + ", seek(end-2)", expected, actual, point);
+
+        // seek to end - 1
+        point = expected.length() - 1;
+        assertSameStreams(msg + ", seek(end-1)", expected, actual, point);
+
+        // seek to the end
+        point = expected.length();
+        assertSameStreams(msg + ", seek(end)", expected, actual, point);
+
+        // seek past end
+        point = expected.length() + 1;
+        assertSameStreams(msg + ", seek(end+1)", expected, actual, point);
+    }
+
+
+    private void assertEqualArrays(String msg,
+                                   byte[] expected,
+                                   byte[] test,
+                                   int start,
+                                   int len)
+    {
+        assertNotNull(msg + " null expected", expected);
+        assertNotNull(msg + " null test", test);
+
+        for (int i=start; i<len; i++) {
+            assertEquals(msg + " " + i, expected[i], test[i]);
+        }
+    }
+
+
+    // ===========================================================
+    //  Tests of the basic CompoundFile functionality
+    // ===========================================================
+
+
+    /** This test creates compound file based on a single file.
+     *  Files of different sizes are tested: 0, 1, 10, 100 bytes.
+     */
+    public void testSingleFile() throws IOException {
+        int data[] = new int[] { 0, 1, 10, 100 };
+        for (int i=0; i<data.length; i++) {
+            String name = "t" + data[i];
+            createSequenceFile(dir, name, (byte) 0, data[i]);
+            CompoundFileWriter csw = new CompoundFileWriter(dir, name + ".cfs");
+            csw.addFile(name);
+            csw.close();
+
+            CompoundFileReader csr = new CompoundFileReader(dir, name + ".cfs");
+            IndexInput expected = dir.openInput(name);
+            IndexInput actual = csr.openInput(name);
+            assertSameStreams(name, expected, actual);
+            assertSameSeekBehavior(name, expected, actual);
+            expected.close();
+            actual.close();
+            csr.close();
+        }
+    }
+
+
+    /** This test creates compound file based on two files.
+     *
+     */
+    public void testTwoFiles() throws IOException {
+        createSequenceFile(dir, "d1", (byte) 0, 15);
+        createSequenceFile(dir, "d2", (byte) 0, 114);
+
+        CompoundFileWriter csw = new CompoundFileWriter(dir, "d.csf");
+        csw.addFile("d1");
+        csw.addFile("d2");
+        csw.close();
+
+        CompoundFileReader csr = new CompoundFileReader(dir, "d.csf");
+        IndexInput expected = dir.openInput("d1");
+        IndexInput actual = csr.openInput("d1");
+        assertSameStreams("d1", expected, actual);
+        assertSameSeekBehavior("d1", expected, actual);
+        expected.close();
+        actual.close();
+
+        expected = dir.openInput("d2");
+        actual = csr.openInput("d2");
+        assertSameStreams("d2", expected, actual);
+        assertSameSeekBehavior("d2", expected, actual);
+        expected.close();
+        actual.close();
+        csr.close();
+    }
+
+    /** This test creates a compound file based on a large number of files of
+     *  various length. The file content is generated randomly. The sizes range
+     *  from 0 to 1Mb. Some of the sizes are selected to test the buffering
+     *  logic in the file reading code. For this the chunk variable is set to
+     *  the length of the buffer used internally by the compound file logic.
+     */
+    public void testRandomFiles() throws IOException {
+        // Setup the test segment
+        String segment = "test";
+        int chunk = 1024; // internal buffer size used by the stream
+        createRandomFile(dir, segment + ".zero", 0);
+        createRandomFile(dir, segment + ".one", 1);
+        createRandomFile(dir, segment + ".ten", 10);
+        createRandomFile(dir, segment + ".hundred", 100);
+        createRandomFile(dir, segment + ".big1", chunk);
+        createRandomFile(dir, segment + ".big2", chunk - 1);
+        createRandomFile(dir, segment + ".big3", chunk + 1);
+        createRandomFile(dir, segment + ".big4", 3 * chunk);
+        createRandomFile(dir, segment + ".big5", 3 * chunk - 1);
+        createRandomFile(dir, segment + ".big6", 3 * chunk + 1);
+        createRandomFile(dir, segment + ".big7", 1000 * chunk);
+
+        // Setup extraneous files
+        createRandomFile(dir, "onetwothree", 100);
+        createRandomFile(dir, segment + ".notIn", 50);
+        createRandomFile(dir, segment + ".notIn2", 51);
+
+        // Now test
+        CompoundFileWriter csw = new CompoundFileWriter(dir, "test.cfs");
+        final String data[] = new String[] {
+            ".zero", ".one", ".ten", ".hundred", ".big1", ".big2", ".big3",
+            ".big4", ".big5", ".big6", ".big7"
+        };
+        for (int i=0; i<data.length; i++) {
+            csw.addFile(segment + data[i]);
+        }
+        csw.close();
+
+        CompoundFileReader csr = new CompoundFileReader(dir, "test.cfs");
+        for (int i=0; i<data.length; i++) {
+            IndexInput check = dir.openInput(segment + data[i]);
+            IndexInput test = csr.openInput(segment + data[i]);
+            assertSameStreams(data[i], check, test);
+            assertSameSeekBehavior(data[i], check, test);
+            test.close();
+            check.close();
+        }
+        csr.close();
+    }
+
+
+    /** Setup a larger compound file with a number of components, each of
+     *  which is a sequential file (so that we can easily tell that we are
+     *  reading in the right byte). The methods sets up 20 files - f0 to f19,
+     *  the size of each file is 1000 bytes.
+     */
+    private void setUp_2() throws IOException {
+        CompoundFileWriter cw = new CompoundFileWriter(dir, "f.comp");
+        for (int i=0; i<20; i++) {
+            createSequenceFile(dir, "f" + i, (byte) 0, 2000);
+            cw.addFile("f" + i);
+        }
+        cw.close();
+    }
+
+
+    public void testReadAfterClose() throws IOException {
+        demo_FSIndexInputBug(dir, "test");
+    }
+
+    private void demo_FSIndexInputBug(Directory fsdir, String file)
+    throws IOException
+    {
+        // Setup the test file - we need more than 1024 bytes
+        IndexOutput os = fsdir.createOutput(file);
+        for(int i=0; i<2000; i++) {
+            os.writeByte((byte) i);
+        }
+        os.close();
+
+        IndexInput in = fsdir.openInput(file);
+
+        // This read primes the buffer in IndexInput
+        in.readByte();
+
+        // Close the file
+        in.close();
+
+        // ERROR: this call should fail, but succeeds because the buffer
+        // is still filled
+        in.readByte();
+
+        // ERROR: this call should fail, but succeeds for some reason as well
+        in.seek(1099);
+
+        try {
+            // OK: this call correctly fails. We are now past the 1024 internal
+            // buffer, so an actual IO is attempted, which fails
+            in.readByte();
+            fail("expected readByte() to throw exception");
+        } catch (IOException e) {
+          // expected exception
+        }
+    }
+
+
+    static boolean isCSIndexInput(IndexInput is) {
+        return is instanceof CompoundFileReader.CSIndexInput;
+    }
+
+    static boolean isCSIndexInputOpen(IndexInput is) throws IOException {
+        if (isCSIndexInput(is)) {
+            CompoundFileReader.CSIndexInput cis =
+            (CompoundFileReader.CSIndexInput) is;
+
+            return _TestHelper.isSimpleFSIndexInputOpen(cis.base);
+        } else {
+            return false;
+        }
+    }
+
+
+    public void testClonedStreamsClosing() throws IOException {
+        setUp_2();
+        CompoundFileReader cr = new CompoundFileReader(dir, "f.comp");
+
+        // basic clone
+        IndexInput expected = dir.openInput("f11");
+
+        // this test only works for FSIndexInput
+        assertTrue(_TestHelper.isSimpleFSIndexInput(expected));
+        assertTrue(_TestHelper.isSimpleFSIndexInputOpen(expected));
+
+        IndexInput one = cr.openInput("f11");
+        assertTrue(isCSIndexInputOpen(one));
+
+        IndexInput two = (IndexInput) one.clone();
+        assertTrue(isCSIndexInputOpen(two));
+
+        assertSameStreams("basic clone one", expected, one);
+        expected.seek(0);
+        assertSameStreams("basic clone two", expected, two);
+
+        // Now close the first stream
+        one.close();
+        assertTrue("Only close when cr is closed", isCSIndexInputOpen(one));
+
+        // The following should really fail since we couldn't expect to
+        // access a file once close has been called on it (regardless of
+        // buffering and/or clone magic)
+        expected.seek(0);
+        two.seek(0);
+        assertSameStreams("basic clone two/2", expected, two);
+
+
+        // Now close the compound reader
+        cr.close();
+        assertFalse("Now closed one", isCSIndexInputOpen(one));
+        assertFalse("Now closed two", isCSIndexInputOpen(two));
+
+        // The following may also fail since the compound stream is closed
+        expected.seek(0);
+        two.seek(0);
+        //assertSameStreams("basic clone two/3", expected, two);
+
+
+        // Now close the second clone
+        two.close();
+        expected.seek(0);
+        two.seek(0);
+        //assertSameStreams("basic clone two/4", expected, two);
+
+        expected.close();
+    }
+
+
+    /** This test opens two files from a compound stream and verifies that
+     *  their file positions are independent of each other.
+     */
+    public void testRandomAccess() throws IOException {
+        setUp_2();
+        CompoundFileReader cr = new CompoundFileReader(dir, "f.comp");
+
+        // Open two files
+        IndexInput e1 = dir.openInput("f11");
+        IndexInput e2 = dir.openInput("f3");
+
+        IndexInput a1 = cr.openInput("f11");
+        IndexInput a2 = dir.openInput("f3");
+
+        // Seek the first pair
+        e1.seek(100);
+        a1.seek(100);
+        assertEquals(100, e1.getFilePointer());
+        assertEquals(100, a1.getFilePointer());
+        byte be1 = e1.readByte();
+        byte ba1 = a1.readByte();
+        assertEquals(be1, ba1);
+
+        // Now seek the second pair
+        e2.seek(1027);
+        a2.seek(1027);
+        assertEquals(1027, e2.getFilePointer());
+        assertEquals(1027, a2.getFilePointer());
+        byte be2 = e2.readByte();
+        byte ba2 = a2.readByte();
+        assertEquals(be2, ba2);
+
+        // Now make sure the first one didn't move
+        assertEquals(101, e1.getFilePointer());
+        assertEquals(101, a1.getFilePointer());
+        be1 = e1.readByte();
+        ba1 = a1.readByte();
+        assertEquals(be1, ba1);
+
+        // Now more the first one again, past the buffer length
+        e1.seek(1910);
+        a1.seek(1910);
+        assertEquals(1910, e1.getFilePointer());
+        assertEquals(1910, a1.getFilePointer());
+        be1 = e1.readByte();
+        ba1 = a1.readByte();
+        assertEquals(be1, ba1);
+
+        // Now make sure the second set didn't move
+        assertEquals(1028, e2.getFilePointer());
+        assertEquals(1028, a2.getFilePointer());
+        be2 = e2.readByte();
+        ba2 = a2.readByte();
+        assertEquals(be2, ba2);
+
+        // Move the second set back, again cross the buffer size
+        e2.seek(17);
+        a2.seek(17);
+        assertEquals(17, e2.getFilePointer());
+        assertEquals(17, a2.getFilePointer());
+        be2 = e2.readByte();
+        ba2 = a2.readByte();
+        assertEquals(be2, ba2);
+
+        // Finally, make sure the first set didn't move
+        // Now make sure the first one didn't move
+        assertEquals(1911, e1.getFilePointer());
+        assertEquals(1911, a1.getFilePointer());
+        be1 = e1.readByte();
+        ba1 = a1.readByte();
+        assertEquals(be1, ba1);
+
+        e1.close();
+        e2.close();
+        a1.close();
+        a2.close();
+        cr.close();
+    }
+
+    /** This test opens two files from a compound stream and verifies that
+     *  their file positions are independent of each other.
+     */
+    public void testRandomAccessClones() throws IOException {
+        setUp_2();
+        CompoundFileReader cr = new CompoundFileReader(dir, "f.comp");
+
+        // Open two files
+        IndexInput e1 = cr.openInput("f11");
+        IndexInput e2 = cr.openInput("f3");
+
+        IndexInput a1 = (IndexInput) e1.clone();
+        IndexInput a2 = (IndexInput) e2.clone();
+
+        // Seek the first pair
+        e1.seek(100);
+        a1.seek(100);
+        assertEquals(100, e1.getFilePointer());
+        assertEquals(100, a1.getFilePointer());
+        byte be1 = e1.readByte();
+        byte ba1 = a1.readByte();
+        assertEquals(be1, ba1);
+
+        // Now seek the second pair
+        e2.seek(1027);
+        a2.seek(1027);
+        assertEquals(1027, e2.getFilePointer());
+        assertEquals(1027, a2.getFilePointer());
+        byte be2 = e2.readByte();
+        byte ba2 = a2.readByte();
+        assertEquals(be2, ba2);
+
+        // Now make sure the first one didn't move
+        assertEquals(101, e1.getFilePointer());
+        assertEquals(101, a1.getFilePointer());
+        be1 = e1.readByte();
+        ba1 = a1.readByte();
+        assertEquals(be1, ba1);
+
+        // Now more the first one again, past the buffer length
+        e1.seek(1910);
+        a1.seek(1910);
+        assertEquals(1910, e1.getFilePointer());
+        assertEquals(1910, a1.getFilePointer());
+        be1 = e1.readByte();
+        ba1 = a1.readByte();
+        assertEquals(be1, ba1);
+
+        // Now make sure the second set didn't move
+        assertEquals(1028, e2.getFilePointer());
+        assertEquals(1028, a2.getFilePointer());
+        be2 = e2.readByte();
+        ba2 = a2.readByte();
+        assertEquals(be2, ba2);
+
+        // Move the second set back, again cross the buffer size
+        e2.seek(17);
+        a2.seek(17);
+        assertEquals(17, e2.getFilePointer());
+        assertEquals(17, a2.getFilePointer());
+        be2 = e2.readByte();
+        ba2 = a2.readByte();
+        assertEquals(be2, ba2);
+
+        // Finally, make sure the first set didn't move
+        // Now make sure the first one didn't move
+        assertEquals(1911, e1.getFilePointer());
+        assertEquals(1911, a1.getFilePointer());
+        be1 = e1.readByte();
+        ba1 = a1.readByte();
+        assertEquals(be1, ba1);
+
+        e1.close();
+        e2.close();
+        a1.close();
+        a2.close();
+        cr.close();
+    }
+
+
+    public void testFileNotFound() throws IOException {
+        setUp_2();
+        CompoundFileReader cr = new CompoundFileReader(dir, "f.comp");
+
+        // Open two files
+        try {
+            cr.openInput("bogus");
+            fail("File not found");
+
+        } catch (IOException e) {
+            /* success */
+            //System.out.println("SUCCESS: File Not Found: " + e);
+        }
+
+        cr.close();
+    }
+
+
+    public void testReadPastEOF() throws IOException {
+        setUp_2();
+        CompoundFileReader cr = new CompoundFileReader(dir, "f.comp");
+        IndexInput is = cr.openInput("f2");
+        is.seek(is.length() - 10);
+        byte b[] = new byte[100];
+        is.readBytes(b, 0, 10);
+
+        try {
+            is.readByte();
+            fail("Single byte read past end of file");
+        } catch (IOException e) {
+            /* success */
+            //System.out.println("SUCCESS: single byte read past end of file: " + e);
+        }
+
+        is.seek(is.length() - 10);
+        try {
+            is.readBytes(b, 0, 50);
+            fail("Block read past end of file");
+        } catch (IOException e) {
+            /* success */
+            //System.out.println("SUCCESS: block read past end of file: " + e);
+        }
+
+        is.close();
+        cr.close();
+    }
+
+    /** This test that writes larger than the size of the buffer output
+     * will correctly increment the file pointer.
+     */
+    public void testLargeWrites() throws IOException {
+        IndexOutput os = dir.createOutput("testBufferStart.txt");
+
+        byte[] largeBuf = new byte[2048];
+        for (int i=0; i<largeBuf.length; i++) {
+            largeBuf[i] = (byte) (Math.random() * 256);
+        }
+
+        long currentPos = os.getFilePointer();
+        os.writeBytes(largeBuf, largeBuf.length);
+
+        try {
+            assertEquals(currentPos + largeBuf.length, os.getFilePointer());
+        } finally {
+            os.close();
+        }
+
+    }
+    
+   public void testAddExternalFile() throws IOException {
+       createSequenceFile(dir, "d1", (byte) 0, 15);
+
+       Directory newDir = newDirectory();
+       CompoundFileWriter csw = new CompoundFileWriter(newDir, "d.csf");
+       csw.addFile("d1", dir);
+       csw.close();
+
+       CompoundFileReader csr = new CompoundFileReader(newDir, "d.csf");
+       IndexInput expected = dir.openInput("d1");
+       IndexInput actual = csr.openInput("d1");
+       assertSameStreams("d1", expected, actual);
+       assertSameSeekBehavior("d1", expected, actual);
+       expected.close();
+       actual.close();
+       csr.close();
+       
+       newDir.close();
+   }
+
+  // Make sure we don't somehow use more than 1 descriptor
+  // when reading a CFS with many subs:
+  public void testManySubFiles() throws IOException {
+
+    final Directory d = newFSDirectory(_TestUtil.getTempDir("CFSManySubFiles"));
+    final int FILE_COUNT = 10000;
+
+    for(int fileIdx=0;fileIdx<FILE_COUNT;fileIdx++) {
+      IndexOutput out = d.createOutput("file." + fileIdx);
+      out.writeByte((byte) fileIdx);
+      out.close();
+    }
+    
+    final CompoundFileWriter cfw = new CompoundFileWriter(d, "c.cfs");
+    for(int fileIdx=0;fileIdx<FILE_COUNT;fileIdx++) {
+      cfw.addFile("file." + fileIdx);
+    }
+    cfw.close();
+
+    final IndexInput[] ins = new IndexInput[FILE_COUNT];
+    final CompoundFileReader cfr = new CompoundFileReader(d, "c.cfs");
+    for(int fileIdx=0;fileIdx<FILE_COUNT;fileIdx++) {
+      ins[fileIdx] = cfr.openInput("file." + fileIdx);
+    }
+
+    for(int fileIdx=0;fileIdx<FILE_COUNT;fileIdx++) {
+      assertEquals((byte) fileIdx, ins[fileIdx].readByte());
+    }
+
+    for(int fileIdx=0;fileIdx<FILE_COUNT;fileIdx++) {
+      ins[fileIdx].close();
+    }
+    cfr.close();
+    d.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java b/lucene/backwards/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
new file mode 100644
index 0000000..90a318f
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
@@ -0,0 +1,252 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+
+import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+
+public class TestConcurrentMergeScheduler extends LuceneTestCase {
+  
+  private static class FailOnlyOnFlush extends MockDirectoryWrapper.Failure {
+    boolean doFail;
+    boolean hitExc;
+
+    @Override
+    public void setDoFail() {
+      this.doFail = true;
+      hitExc = false;
+    }
+    @Override
+    public void clearDoFail() {
+      this.doFail = false;
+    }
+
+    @Override
+    public void eval(MockDirectoryWrapper dir)  throws IOException {
+      if (doFail && (Thread.currentThread().getName().equals("main") 
+          || Thread.currentThread().getName().equals("Main Thread"))) {
+        boolean isDoFlush = false;
+        boolean isClose = false;
+        StackTraceElement[] trace = new Exception().getStackTrace();
+        for (int i = 0; i < trace.length; i++) {
+          if ("doFlush".equals(trace[i].getMethodName())) {
+            isDoFlush = true;
+          }
+          if ("close".equals(trace[i].getMethodName())) {
+            isClose = true;
+          }
+        }
+        if (isDoFlush && !isClose && random.nextBoolean()) {
+          hitExc = true;
+          throw new IOException(Thread.currentThread().getName() + ": now failing during flush");
+        }
+      }
+    }
+  }
+
+  // Make sure running BG merges still work fine even when
+  // we are hitting exceptions during flushing.
+  public void testFlushExceptions() throws IOException {
+    MockDirectoryWrapper directory = newDirectory();
+    FailOnlyOnFlush failure = new FailOnlyOnFlush();
+    directory.failOn(failure);
+
+    IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    Document doc = new Document();
+    Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+    doc.add(idField);
+    int extraCount = 0;
+
+    for(int i=0;i<10;i++) {
+      if (VERBOSE) {
+        System.out.println("TEST: iter=" + i);
+      }
+
+      for(int j=0;j<20;j++) {
+        idField.setValue(Integer.toString(i*20+j));
+        writer.addDocument(doc);
+      }
+
+      // must cycle here because sometimes the merge flushes
+      // the doc we just added and so there's nothing to
+      // flush, and we don't hit the exception
+      while(true) {
+        writer.addDocument(doc);
+        failure.setDoFail();
+        try {
+          writer.flush(true, true);
+          if (failure.hitExc) {
+            fail("failed to hit IOException");
+          }
+          extraCount++;
+        } catch (IOException ioe) {
+          if (VERBOSE) {
+            ioe.printStackTrace(System.out);
+          }
+          failure.clearDoFail();
+          break;
+        }
+      }
+      assertEquals(20*(i+1)+extraCount, writer.numDocs());
+    }
+
+    writer.close();
+    IndexReader reader = IndexReader.open(directory, true);
+    assertEquals(200+extraCount, reader.numDocs());
+    reader.close();
+    directory.close();
+  }
+
+  // Test that deletes committed after a merge started and
+  // before it finishes, are correctly merged back:
+  public void testDeleteMerging() throws IOException {
+    MockDirectoryWrapper directory = newDirectory();
+
+    LogDocMergePolicy mp = new LogDocMergePolicy();
+    // Force degenerate merging so we can get a mix of
+    // merging of segments with and without deletes at the
+    // start:
+    mp.setMinMergeDocs(1000);
+    IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMergePolicy(mp));
+    writer.setInfoStream(VERBOSE ? System.out : null);
+
+    Document doc = new Document();
+    Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+    doc.add(idField);
+    for(int i=0;i<10;i++) {
+      if (VERBOSE) {
+        System.out.println("\nTEST: cycle");
+      }
+      for(int j=0;j<100;j++) {
+        idField.setValue(Integer.toString(i*100+j));
+        writer.addDocument(doc);
+      }
+
+      int delID = i;
+      while(delID < 100*(1+i)) {
+        if (VERBOSE) {
+          System.out.println("TEST: del " + delID);
+        }
+        writer.deleteDocuments(new Term("id", ""+delID));
+        delID += 10;
+      }
+
+      writer.commit();
+    }
+
+    writer.close();
+    IndexReader reader = IndexReader.open(directory, true);
+    // Verify that we did not lose any deletes...
+    assertEquals(450, reader.numDocs());
+    reader.close();
+    directory.close();
+  }
+
+  public void testNoExtraFiles() throws IOException {
+    MockDirectoryWrapper directory = newDirectory();
+    IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(2));
+    writer.setInfoStream(VERBOSE ? System.out : null);
+
+    for(int iter=0;iter<7;iter++) {
+      if (VERBOSE) {
+        System.out.println("TEST: iter=" + iter);
+      }
+
+      for(int j=0;j<21;j++) {
+        Document doc = new Document();
+        doc.add(newField("content", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+        writer.addDocument(doc);
+      }
+        
+      writer.close();
+      TestIndexWriter.assertNoUnreferencedFiles(directory, "testNoExtraFiles");
+
+      // Reopen
+      writer = new IndexWriter(directory, newIndexWriterConfig(
+          TEST_VERSION_CURRENT, new MockAnalyzer(random))
+          .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(2));
+      writer.setInfoStream(VERBOSE ? System.out : null);
+    }
+
+    writer.close();
+
+    directory.close();
+  }
+
+  public void testNoWaitClose() throws IOException {
+    MockDirectoryWrapper directory = newDirectory();
+    Document doc = new Document();
+    Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+    doc.add(idField);
+
+    IndexWriter writer = new IndexWriter(
+        directory,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(2).
+            setMergePolicy(newLogMergePolicy(100))
+    );
+
+    for(int iter=0;iter<10;iter++) {
+
+      for(int j=0;j<201;j++) {
+        idField.setValue(Integer.toString(iter*201+j));
+        writer.addDocument(doc);
+      }
+
+      int delID = iter*201;
+      for(int j=0;j<20;j++) {
+        writer.deleteDocuments(new Term("id", Integer.toString(delID)));
+        delID += 5;
+      }
+
+      // Force a bunch of merge threads to kick off so we
+      // stress out aborting them on close:
+      ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
+      writer.addDocument(doc);
+      writer.commit();
+
+      writer.close(false);
+
+      IndexReader reader = IndexReader.open(directory, true);
+      assertEquals((1+iter)*182, reader.numDocs());
+      reader.close();
+
+      // Reopen
+      writer = new IndexWriter(
+          directory,
+          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+              setOpenMode(OpenMode.APPEND).
+              setMergePolicy(newLogMergePolicy(100))
+      );
+    }
+    writer.close();
+
+    directory.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestCrash.java b/lucene/backwards/src/test/org/apache/lucene/index/TestCrash.java
new file mode 100644
index 0000000..d8062bc
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestCrash.java
@@ -0,0 +1,209 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Random;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.NoLockFactory;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+
+public class TestCrash extends LuceneTestCase {
+
+  private IndexWriter initIndex(Random random, boolean initialCommit) throws IOException {
+    return initIndex(random, newDirectory(), initialCommit);
+  }
+
+  private IndexWriter initIndex(Random random, MockDirectoryWrapper dir, boolean initialCommit) throws IOException {
+    dir.setLockFactory(NoLockFactory.getNoLockFactory());
+
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10)
+        .setMergeScheduler(new ConcurrentMergeScheduler())
+        .setMergePolicy(newLogMergePolicy()));
+    ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).setSuppressExceptions();
+    if (initialCommit) {
+      writer.commit();
+    }
+    
+    Document doc = new Document();
+    doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(newField("id", "0", Field.Store.YES, Field.Index.ANALYZED));
+    for(int i=0;i<157;i++)
+      writer.addDocument(doc);
+
+    return writer;
+  }
+
+  private void crash(final IndexWriter writer) throws IOException {
+    final MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory();
+    ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler();
+    cms.sync();
+    dir.crash();
+    cms.sync();
+    dir.clearCrash();
+  }
+
+  public void testCrashWhileIndexing() throws IOException {
+    // This test relies on being able to open a reader before any commit
+    // happened, so we must create an initial commit just to allow that, but
+    // before any documents were added.
+    IndexWriter writer = initIndex(random, true);
+    Directory dir = writer.getDirectory();
+    crash(writer);
+    IndexReader reader = IndexReader.open(dir, false);
+    assertTrue(reader.numDocs() < 157);
+    reader.close();
+    dir.close();
+  }
+
+  public void testWriterAfterCrash() throws IOException {
+    // This test relies on being able to open a reader before any commit
+    // happened, so we must create an initial commit just to allow that, but
+    // before any documents were added.
+    IndexWriter writer = initIndex(random, true);
+    MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory();
+    dir.setPreventDoubleWrite(false);
+    crash(writer);
+    writer = initIndex(random, dir, false);
+    writer.close();
+
+    IndexReader reader = IndexReader.open(dir, false);
+    assertTrue(reader.numDocs() < 314);
+    reader.close();
+    dir.close();
+  }
+
+  public void testCrashAfterReopen() throws IOException {
+    IndexWriter writer = initIndex(random, false);
+    MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory();
+    writer.close();
+    writer = initIndex(random, dir, false);
+    assertEquals(314, writer.maxDoc());
+    crash(writer);
+
+    /*
+    System.out.println("\n\nTEST: open reader");
+    String[] l = dir.list();
+    Arrays.sort(l);
+    for(int i=0;i<l.length;i++)
+      System.out.println("file " + i + " = " + l[i] + " " +
+    dir.fileLength(l[i]) + " bytes");
+    */
+
+    IndexReader reader = IndexReader.open(dir, false);
+    assertTrue(reader.numDocs() >= 157);
+    reader.close();
+    dir.close();
+  }
+
+  public void testCrashAfterClose() throws IOException {
+    
+    IndexWriter writer = initIndex(random, false);
+    MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory();
+
+    writer.close();
+    dir.crash();
+
+    /*
+    String[] l = dir.list();
+    Arrays.sort(l);
+    for(int i=0;i<l.length;i++)
+      System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes");
+    */
+
+    IndexReader reader = IndexReader.open(dir, false);
+    assertEquals(157, reader.numDocs());
+    reader.close();
+    dir.close();
+  }
+
+  public void testCrashAfterCloseNoWait() throws IOException {
+    
+    IndexWriter writer = initIndex(random, false);
+    MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory();
+
+    writer.close(false);
+
+    dir.crash();
+
+    /*
+    String[] l = dir.list();
+    Arrays.sort(l);
+    for(int i=0;i<l.length;i++)
+      System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes");
+    */
+    IndexReader reader = IndexReader.open(dir, false);
+    assertEquals(157, reader.numDocs());
+    reader.close();
+    dir.close();
+  }
+
+  public void testCrashReaderDeletes() throws IOException {
+    
+    IndexWriter writer = initIndex(random, false);
+    MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory();
+
+    writer.close(false);
+    IndexReader reader = IndexReader.open(dir, false);
+    reader.deleteDocument(3);
+
+    dir.crash();
+
+    /*
+    String[] l = dir.list();
+    Arrays.sort(l);
+    for(int i=0;i<l.length;i++)
+      System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes");
+    */
+    reader = IndexReader.open(dir, false);
+    assertEquals(157, reader.numDocs());
+    reader.close();
+    dir.clearCrash();
+    dir.close();
+  }
+
+  public void testCrashReaderDeletesAfterClose() throws IOException {
+    
+    IndexWriter writer = initIndex(random, false);
+    MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory();
+
+    writer.close(false);
+    IndexReader reader = IndexReader.open(dir, false);
+    reader.deleteDocument(3);
+    reader.close();
+
+    dir.crash();
+
+    /*
+    String[] l = dir.list();
+    Arrays.sort(l);
+    for(int i=0;i<l.length;i++)
+      System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes");
+    */
+    reader = IndexReader.open(dir, false);
+    assertEquals(156, reader.numDocs());
+    reader.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestDeletionPolicy.java b/lucene/backwards/src/test/org/apache/lucene/index/TestDeletionPolicy.java
new file mode 100644
index 0000000..cb0a7b8
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestDeletionPolicy.java
@@ -0,0 +1,849 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.Collection;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/*
+  Verify we can read the pre-2.1 file format, do searches
+  against it, and add documents to it.
+*/
+
+public class TestDeletionPolicy extends LuceneTestCase {
+  
+  private void verifyCommitOrder(List<? extends IndexCommit> commits) throws IOException {
+    final IndexCommit firstCommit =  commits.get(0);
+    long last = SegmentInfos.generationFromSegmentsFileName(firstCommit.getSegmentsFileName());
+    assertEquals(last, firstCommit.getGeneration());
+    long lastVersion = firstCommit.getVersion();
+    long lastTimestamp = firstCommit.getTimestamp();
+    for(int i=1;i<commits.size();i++) {
+      final IndexCommit commit =  commits.get(i);
+      long now = SegmentInfos.generationFromSegmentsFileName(commit.getSegmentsFileName());
+      long nowVersion = commit.getVersion();
+      long nowTimestamp = commit.getTimestamp();
+      assertTrue("SegmentInfos commits are out-of-order", now > last);
+      assertTrue("SegmentInfos versions are out-of-order", nowVersion > lastVersion);
+      assertTrue("SegmentInfos timestamps are out-of-order: now=" + nowTimestamp + " vs last=" + lastTimestamp, nowTimestamp >= lastTimestamp);
+      assertEquals(now, commit.getGeneration());
+      last = now;
+      lastVersion = nowVersion;
+      lastTimestamp = nowTimestamp;
+    }
+  }
+
+  class KeepAllDeletionPolicy implements IndexDeletionPolicy {
+    int numOnInit;
+    int numOnCommit;
+    Directory dir;
+    public void onInit(List<? extends IndexCommit> commits) throws IOException {
+      verifyCommitOrder(commits);
+      numOnInit++;
+    }
+    public void onCommit(List<? extends IndexCommit> commits) throws IOException {
+      IndexCommit lastCommit =  commits.get(commits.size()-1);
+      IndexReader r = IndexReader.open(dir, true);
+      assertEquals("lastCommit.isOptimized()=" + lastCommit.isOptimized() + " vs IndexReader.isOptimized=" + r.isOptimized(), r.isOptimized(), lastCommit.isOptimized());
+      r.close();
+      verifyCommitOrder(commits);
+      numOnCommit++;
+    }
+  }
+
+  /**
+   * This is useful for adding to a big index when you know
+   * readers are not using it.
+   */
+  class KeepNoneOnInitDeletionPolicy implements IndexDeletionPolicy {
+    int numOnInit;
+    int numOnCommit;
+    public void onInit(List<? extends IndexCommit> commits) throws IOException {
+      verifyCommitOrder(commits);
+      numOnInit++;
+      // On init, delete all commit points:
+      for (final IndexCommit commit : commits) {
+        commit.delete();
+        assertTrue(commit.isDeleted());
+      }
+    }
+    public void onCommit(List<? extends IndexCommit> commits) throws IOException {
+      verifyCommitOrder(commits);
+      int size = commits.size();
+      // Delete all but last one:
+      for(int i=0;i<size-1;i++) {
+        ((IndexCommit) commits.get(i)).delete();
+      }
+      numOnCommit++;
+    }
+  }
+
+  class KeepLastNDeletionPolicy implements IndexDeletionPolicy {
+    int numOnInit;
+    int numOnCommit;
+    int numToKeep;
+    int numDelete;
+    Set<String> seen = new HashSet<String>();
+
+    public KeepLastNDeletionPolicy(int numToKeep) {
+      this.numToKeep = numToKeep;
+    }
+
+    public void onInit(List<? extends IndexCommit> commits) throws IOException {
+      if (VERBOSE) {
+        System.out.println("TEST: onInit");
+      }
+      verifyCommitOrder(commits);
+      numOnInit++;
+      // do no deletions on init
+      doDeletes(commits, false);
+    }
+
+    public void onCommit(List<? extends IndexCommit> commits) throws IOException {
+      if (VERBOSE) {
+        System.out.println("TEST: onCommit");
+      }
+      verifyCommitOrder(commits);
+      doDeletes(commits, true);
+    }
+    
+    private void doDeletes(List<? extends IndexCommit> commits, boolean isCommit) {
+
+      // Assert that we really are only called for each new
+      // commit:
+      if (isCommit) {
+        String fileName = ((IndexCommit) commits.get(commits.size()-1)).getSegmentsFileName();
+        if (seen.contains(fileName)) {
+          throw new RuntimeException("onCommit was called twice on the same commit point: " + fileName);
+        }
+        seen.add(fileName);
+        numOnCommit++;
+      }
+      int size = commits.size();
+      for(int i=0;i<size-numToKeep;i++) {
+        ((IndexCommit) commits.get(i)).delete();
+        numDelete++;
+      }
+    }
+  }
+
+  /*
+   * Delete a commit only when it has been obsoleted by N
+   * seconds.
+   */
+  class ExpirationTimeDeletionPolicy implements IndexDeletionPolicy {
+
+    Directory dir;
+    double expirationTimeSeconds;
+    int numDelete;
+
+    public ExpirationTimeDeletionPolicy(Directory dir, double seconds) {
+      this.dir = dir;
+      this.expirationTimeSeconds = seconds;
+    }
+
+    public void onInit(List<? extends IndexCommit> commits) throws IOException {
+      verifyCommitOrder(commits);
+      onCommit(commits);
+    }
+
+    public void onCommit(List<? extends IndexCommit> commits) throws IOException {
+      verifyCommitOrder(commits);
+
+      IndexCommit lastCommit = commits.get(commits.size()-1);
+
+      // Any commit older than expireTime should be deleted:
+      double expireTime = dir.fileModified(lastCommit.getSegmentsFileName())/1000.0 - expirationTimeSeconds;
+
+      for (final IndexCommit commit : commits) {
+        double modTime = dir.fileModified(commit.getSegmentsFileName())/1000.0;
+        if (commit != lastCommit && modTime < expireTime) {
+          commit.delete();
+          numDelete += 1;
+        }
+      }
+    }
+  }
+
+  /*
+   * Test "by time expiration" deletion policy:
+   */
+  public void testExpirationTimeDeletionPolicy() throws IOException, InterruptedException {
+
+    final double SECONDS = 2.0;
+
+    Directory dir = newDirectory();
+    ExpirationTimeDeletionPolicy policy = new ExpirationTimeDeletionPolicy(dir, SECONDS);
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
+        new MockAnalyzer(random))
+        .setIndexDeletionPolicy(policy);
+    MergePolicy mp = conf.getMergePolicy();
+    if (mp instanceof LogMergePolicy) {
+      setUseCompoundFile(mp, true);
+    }
+    IndexWriter writer = new IndexWriter(dir, conf);
+    writer.close();
+
+    final int ITER = 9;
+
+    long lastDeleteTime = 0;
+    for(int i=0;i<ITER;i++) {
+      // Record last time when writer performed deletes of
+      // past commits
+      lastDeleteTime = System.currentTimeMillis();
+      conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
+          new MockAnalyzer(random)).setOpenMode(
+          OpenMode.APPEND).setIndexDeletionPolicy(policy);
+      mp = conf.getMergePolicy();
+      if (mp instanceof LogMergePolicy) {
+        setUseCompoundFile(mp, true);
+      }
+      writer = new IndexWriter(dir, conf);
+      for(int j=0;j<17;j++) {
+        addDoc(writer);
+      }
+      writer.close();
+
+      if (i < ITER-1) {
+        // Make sure to sleep long enough so that some commit
+        // points will be deleted:
+        Thread.sleep((int) (1000.0*(SECONDS/5.0)));
+      }
+    }
+
+    // First, make sure the policy in fact deleted something:
+    assertTrue("no commits were deleted", policy.numDelete > 0);
+
+    // Then simplistic check: just verify that the
+    // segments_N's that still exist are in fact within SECONDS
+    // seconds of the last one's mod time, and, that I can
+    // open a reader on each:
+    long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+    
+    String fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
+                                                            "",
+                                                            gen);
+    dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
+
+    boolean oneSecondResolution = true;
+
+    while(gen > 0) {
+      try {
+        IndexReader reader = IndexReader.open(dir, true);
+        reader.close();
+        fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
+                                                         "",
+                                                         gen);
+
+        // if we are on a filesystem that seems to have only
+        // 1 second resolution, allow +1 second in commit
+        // age tolerance:
+        long modTime = dir.fileModified(fileName);
+        oneSecondResolution &= (modTime % 1000) == 0;
+        final long leeway = (long) ((SECONDS + (oneSecondResolution ? 1.0:0.0))*1000);
+
+        assertTrue("commit point was older than " + SECONDS + " seconds (" + (lastDeleteTime - modTime) + " msec) but did not get deleted ", lastDeleteTime - modTime <= leeway);
+      } catch (IOException e) {
+        // OK
+        break;
+      }
+      
+      dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
+      gen--;
+    }
+
+    dir.close();
+  }
+
+  /*
+   * Test a silly deletion policy that keeps all commits around.
+   */
+  public void testKeepAllDeletionPolicy() throws IOException {
+    for(int pass=0;pass<2;pass++) {
+
+      if (VERBOSE) {
+        System.out.println("TEST: cycle pass=" + pass);
+      }
+
+      boolean useCompoundFile = (pass % 2) != 0;
+
+      // Never deletes a commit
+      KeepAllDeletionPolicy policy = new KeepAllDeletionPolicy();
+
+      Directory dir = newDirectory();
+      policy.dir = dir;
+
+      IndexWriterConfig conf = newIndexWriterConfig(
+          TEST_VERSION_CURRENT, new MockAnalyzer(random))
+          .setIndexDeletionPolicy(policy).setMaxBufferedDocs(10)
+          .setMergeScheduler(new SerialMergeScheduler());
+      MergePolicy mp = conf.getMergePolicy();
+      if (mp instanceof LogMergePolicy) {
+        setUseCompoundFile(mp, useCompoundFile);
+      }
+      IndexWriter writer = new IndexWriter(dir, conf);
+      for(int i=0;i<107;i++) {
+        addDoc(writer);
+      }
+      writer.close();
+
+      final boolean isOptimized;
+      {
+        IndexReader r = IndexReader.open(dir);
+        isOptimized = r.isOptimized();
+        r.close();
+      }
+      if (!isOptimized) {
+        conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
+                                    new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(
+                                                                                              OpenMode.APPEND).setIndexDeletionPolicy(policy);
+        mp = conf.getMergePolicy();
+        if (mp instanceof LogMergePolicy) {
+          setUseCompoundFile(mp, true);
+        }
+        if (VERBOSE) {
+          System.out.println("TEST: open writer for optimize");
+        }
+        writer = new IndexWriter(dir, conf);
+        writer.setInfoStream(VERBOSE ? System.out : null);
+        writer.optimize();
+        writer.close();
+      }
+      assertEquals(isOptimized ? 0:1, policy.numOnInit);
+
+      // If we are not auto committing then there should
+      // be exactly 2 commits (one per close above):
+      assertEquals(1 + (isOptimized ? 0:1), policy.numOnCommit);
+
+      // Test listCommits
+      Collection<IndexCommit> commits = IndexReader.listCommits(dir);
+      // 2 from closing writer
+      assertEquals(1 + (isOptimized ? 0:1), commits.size());
+
+      // Make sure we can open a reader on each commit:
+      for (final IndexCommit commit : commits) {
+        IndexReader r = IndexReader.open(commit, null, false);
+        r.close();
+      }
+
+      // Simplistic check: just verify all segments_N's still
+      // exist, and, I can open a reader on each:
+      dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
+      long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+      while(gen > 0) {
+        IndexReader reader = IndexReader.open(dir, true);
+        reader.close();
+        dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
+        gen--;
+
+        if (gen > 0) {
+          // Now that we've removed a commit point, which
+          // should have orphan'd at least one index file.
+          // Open & close a writer and assert that it
+          // actually removed something:
+          int preCount = dir.listAll().length;
+          writer = new IndexWriter(dir, newIndexWriterConfig(
+              TEST_VERSION_CURRENT,
+              new MockAnalyzer(random)).setOpenMode(
+              OpenMode.APPEND).setIndexDeletionPolicy(policy));
+          writer.close();
+          int postCount = dir.listAll().length;
+          assertTrue(postCount < preCount);
+        }
+      }
+
+      dir.close();
+    }
+  }
+
+  /* Uses KeepAllDeletionPolicy to keep all commits around,
+   * then, opens a new IndexWriter on a previous commit
+   * point. */
+  public void testOpenPriorSnapshot() throws IOException {
+    // Never deletes a commit
+    KeepAllDeletionPolicy policy = new KeepAllDeletionPolicy();
+
+    Directory dir = newDirectory();
+    policy.dir = dir;
+
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setIndexDeletionPolicy(policy).
+            setMaxBufferedDocs(2).
+            setMergePolicy(newLogMergePolicy(10))
+    );
+    for(int i=0;i<10;i++) {
+      addDoc(writer);
+      if ((1+i)%2 == 0)
+        writer.commit();
+    }
+    writer.close();
+
+    Collection<IndexCommit> commits = IndexReader.listCommits(dir);
+    assertEquals(5, commits.size());
+    IndexCommit lastCommit = null;
+    for (final IndexCommit commit : commits) {
+      if (lastCommit == null || commit.getGeneration() > lastCommit.getGeneration())
+        lastCommit = commit;
+    }
+    assertTrue(lastCommit != null);
+
+    // Now add 1 doc and optimize
+    writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT,
+        new MockAnalyzer(random)).setIndexDeletionPolicy(policy));
+    addDoc(writer);
+    assertEquals(11, writer.numDocs());
+    writer.optimize();
+    writer.close();
+
+    assertEquals(6, IndexReader.listCommits(dir).size());
+
+    // Now open writer on the commit just before optimize:
+    writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setIndexDeletionPolicy(policy).setIndexCommit(lastCommit));
+    assertEquals(10, writer.numDocs());
+
+    // Should undo our rollback:
+    writer.rollback();
+
+    IndexReader r = IndexReader.open(dir, true);
+    // Still optimized, still 11 docs
+    assertTrue(r.isOptimized());
+    assertEquals(11, r.numDocs());
+    r.close();
+
+    writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setIndexDeletionPolicy(policy).setIndexCommit(lastCommit));
+    assertEquals(10, writer.numDocs());
+    // Commits the rollback:
+    writer.close();
+
+    // Now 8 because we made another commit
+    assertEquals(7, IndexReader.listCommits(dir).size());
+    
+    r = IndexReader.open(dir, true);
+    // Not optimized because we rolled it back, and now only
+    // 10 docs
+    assertTrue(!r.isOptimized());
+    assertEquals(10, r.numDocs());
+    r.close();
+
+    // Reoptimize
+    writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(policy));
+    writer.optimize();
+    writer.close();
+
+    r = IndexReader.open(dir, true);
+    assertTrue(r.isOptimized());
+    assertEquals(10, r.numDocs());
+    r.close();
+
+    // Now open writer on the commit just before optimize,
+    // but this time keeping only the last commit:
+    writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexCommit(lastCommit));
+    assertEquals(10, writer.numDocs());
+    
+    // Reader still sees optimized index, because writer
+    // opened on the prior commit has not yet committed:
+    r = IndexReader.open(dir, true);
+    assertTrue(r.isOptimized());
+    assertEquals(10, r.numDocs());
+    r.close();
+
+    writer.close();
+
+    // Now reader sees unoptimized index:
+    r = IndexReader.open(dir, true);
+    assertTrue(!r.isOptimized());
+    assertEquals(10, r.numDocs());
+    r.close();
+
+    dir.close();
+  }
+
+
+  /* Test keeping NO commit points.  This is a viable and
+   * useful case eg where you want to build a big index and
+   * you know there are no readers.
+   */
+  public void testKeepNoneOnInitDeletionPolicy() throws IOException {
+    for(int pass=0;pass<2;pass++) {
+
+      boolean useCompoundFile = (pass % 2) != 0;
+
+      KeepNoneOnInitDeletionPolicy policy = new KeepNoneOnInitDeletionPolicy();
+
+      Directory dir = newDirectory();
+
+      IndexWriterConfig conf = newIndexWriterConfig(
+          TEST_VERSION_CURRENT, new MockAnalyzer(random))
+          .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy)
+          .setMaxBufferedDocs(10);
+      MergePolicy mp = conf.getMergePolicy();
+      if (mp instanceof LogMergePolicy) {
+        setUseCompoundFile(mp, useCompoundFile);
+      }
+      IndexWriter writer = new IndexWriter(dir, conf);
+      for(int i=0;i<107;i++) {
+        addDoc(writer);
+      }
+      writer.close();
+
+      conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
+          .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy);
+      mp = conf.getMergePolicy();
+      if (mp instanceof LogMergePolicy) {
+        setUseCompoundFile(mp, true);
+      }
+      writer = new IndexWriter(dir, conf);
+      writer.optimize();
+      writer.close();
+
+      assertEquals(1, policy.numOnInit);
+      // If we are not auto committing then there should
+      // be exactly 2 commits (one per close above):
+      assertEquals(2, policy.numOnCommit);
+
+      // Simplistic check: just verify the index is in fact
+      // readable:
+      IndexReader reader = IndexReader.open(dir, true);
+      reader.close();
+
+      dir.close();
+    }
+  }
+
+  /*
+   * Test a deletion policy that keeps last N commits.
+   */
+  public void testKeepLastNDeletionPolicy() throws IOException {
+    final int N = 5;
+
+    for(int pass=0;pass<2;pass++) {
+
+      boolean useCompoundFile = (pass % 2) != 0;
+
+      Directory dir = newDirectory();
+
+      KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
+
+      for(int j=0;j<N+1;j++) {
+        IndexWriterConfig conf = newIndexWriterConfig(
+            TEST_VERSION_CURRENT, new MockAnalyzer(random))
+            .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy)
+            .setMaxBufferedDocs(10);
+        MergePolicy mp = conf.getMergePolicy();
+        if (mp instanceof LogMergePolicy) {
+          setUseCompoundFile(mp, useCompoundFile);
+        }
+        IndexWriter writer = new IndexWriter(dir, conf);
+        for(int i=0;i<17;i++) {
+          addDoc(writer);
+        }
+        writer.optimize();
+        writer.close();
+      }
+
+      assertTrue(policy.numDelete > 0);
+      assertEquals(N, policy.numOnInit);
+      assertEquals(N+1, policy.numOnCommit);
+
+      // Simplistic check: just verify only the past N segments_N's still
+      // exist, and, I can open a reader on each:
+      dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
+      long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+      for(int i=0;i<N+1;i++) {
+        try {
+          IndexReader reader = IndexReader.open(dir, true);
+          reader.close();
+          if (i == N) {
+            fail("should have failed on commits prior to last " + N);
+          }
+        } catch (IOException e) {
+          if (i != N) {
+            throw e;
+          }
+        }
+        if (i < N) {
+          dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
+        }
+        gen--;
+      }
+
+      dir.close();
+    }
+  }
+
+  /*
+   * Test a deletion policy that keeps last N commits
+   * around, with reader doing deletes.
+   */
+  public void testKeepLastNDeletionPolicyWithReader() throws IOException {
+    final int N = 10;
+
+    for(int pass=0;pass<2;pass++) {
+
+      boolean useCompoundFile = (pass % 2) != 0;
+
+      KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
+
+      Directory dir = newDirectory();
+      IndexWriterConfig conf = newIndexWriterConfig(
+          TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy).setMergePolicy(newLogMergePolicy());
+      MergePolicy mp = conf.getMergePolicy();
+      if (mp instanceof LogMergePolicy) {
+        setUseCompoundFile(mp, useCompoundFile);
+      }
+      IndexWriter writer = new IndexWriter(dir, conf);
+      writer.close();
+      Term searchTerm = new Term("content", "aaa");        
+      Query query = new TermQuery(searchTerm);
+
+      for(int i=0;i<N+1;i++) {
+        if (VERBOSE) {
+          System.out.println("\nTEST: cycle i=" + i);
+        }
+        conf = newIndexWriterConfig(
+            TEST_VERSION_CURRENT, new MockAnalyzer(random))
+          .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy).setMergePolicy(newLogMergePolicy());
+        mp = conf.getMergePolicy();
+        if (mp instanceof LogMergePolicy) {
+          setUseCompoundFile(mp, useCompoundFile);
+        }
+        writer = new IndexWriter(dir, conf);
+        writer.setInfoStream(VERBOSE ? System.out : null);
+        for(int j=0;j<17;j++) {
+          addDoc(writer);
+        }
+        // this is a commit
+        if (VERBOSE) {
+          System.out.println("TEST: close writer");
+        }
+        writer.close();
+        IndexReader reader = IndexReader.open(dir, policy, false);
+        reader.deleteDocument(3*i+1);
+        reader.setNorm(4*i+1, "content", 2.0F);
+        IndexSearcher searcher = newSearcher(reader);
+        ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+        assertEquals(16*(1+i), hits.length);
+        // this is a commit
+        if (VERBOSE) {
+          System.out.println("TEST: close reader numOnCommit=" + policy.numOnCommit);
+        }
+        reader.close();
+        searcher.close();
+      }
+      conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
+          .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy);
+      mp = conf.getMergePolicy();
+      if (mp instanceof LogMergePolicy) {
+        setUseCompoundFile(mp, useCompoundFile);
+      }
+      IndexReader r = IndexReader.open(dir);
+      final boolean wasOptimized = r.isOptimized();
+      r.close();
+      writer = new IndexWriter(dir, conf);
+      writer.optimize();
+      // this is a commit
+      writer.close();
+
+      assertEquals(2*(N+1)+1, policy.numOnInit);
+      assertEquals(2*(N+2) - (wasOptimized ? 1:0), policy.numOnCommit);
+
+      IndexSearcher searcher = new IndexSearcher(dir, false);
+      ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+      assertEquals(176, hits.length);
+
+      // Simplistic check: just verify only the past N segments_N's still
+      // exist, and, I can open a reader on each:
+      long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+
+      dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
+      int expectedCount = 176;
+      searcher.close();
+      for(int i=0;i<N+1;i++) {
+        try {
+          IndexReader reader = IndexReader.open(dir, true);
+
+          // Work backwards in commits on what the expected
+          // count should be.
+          searcher = newSearcher(reader);
+          hits = searcher.search(query, null, 1000).scoreDocs;
+          if (i > 1) {
+            if (i % 2 == 0) {
+              expectedCount += 1;
+            } else {
+              expectedCount -= 17;
+            }
+          }
+          assertEquals(expectedCount, hits.length);
+          searcher.close();
+          reader.close();
+          if (i == N) {
+            fail("should have failed on commits before last 5");
+          }
+        } catch (IOException e) {
+          if (i != N) {
+            throw e;
+          }
+        }
+        if (i < N) {
+          dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
+        }
+        gen--;
+      }
+      dir.close();
+    }
+  }
+
+  /*
+   * Test a deletion policy that keeps last N commits
+   * around, through creates.
+   */
+  public void testKeepLastNDeletionPolicyWithCreates() throws IOException {
+    
+    final int N = 10;
+
+    for(int pass=0;pass<2;pass++) {
+
+      boolean useCompoundFile = (pass % 2) != 0;
+
+      KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
+
+      Directory dir = newDirectory();
+      IndexWriterConfig conf = newIndexWriterConfig(
+          TEST_VERSION_CURRENT, new MockAnalyzer(random))
+          .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy)
+          .setMaxBufferedDocs(10);
+      MergePolicy mp = conf.getMergePolicy();
+      if (mp instanceof LogMergePolicy) {
+        setUseCompoundFile(mp, useCompoundFile);
+      }
+      IndexWriter writer = new IndexWriter(dir, conf);
+      writer.close();
+      Term searchTerm = new Term("content", "aaa");        
+      Query query = new TermQuery(searchTerm);
+
+      for(int i=0;i<N+1;i++) {
+
+        conf = newIndexWriterConfig(
+            TEST_VERSION_CURRENT, new MockAnalyzer(random))
+            .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy)
+            .setMaxBufferedDocs(10);
+        mp = conf.getMergePolicy();
+        if (mp instanceof LogMergePolicy) {
+          setUseCompoundFile(mp, useCompoundFile);
+        }
+        writer = new IndexWriter(dir, conf);
+        for(int j=0;j<17;j++) {
+          addDoc(writer);
+        }
+        // this is a commit
+        writer.close();
+        IndexReader reader = IndexReader.open(dir, policy, false);
+        reader.deleteDocument(3);
+        reader.setNorm(5, "content", 2.0F);
+        IndexSearcher searcher = newSearcher(reader);
+        ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+        assertEquals(16, hits.length);
+        // this is a commit
+        reader.close();
+        searcher.close();
+
+        writer = new IndexWriter(dir, newIndexWriterConfig(
+            TEST_VERSION_CURRENT, new MockAnalyzer(random))
+            .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy));
+        // This will not commit: there are no changes
+        // pending because we opened for "create":
+        writer.close();
+      }
+
+      assertEquals(3*(N+1), policy.numOnInit);
+      assertEquals(3*(N+1)+1, policy.numOnCommit);
+
+      IndexSearcher searcher = new IndexSearcher(dir, false);
+      ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+      assertEquals(0, hits.length);
+
+      // Simplistic check: just verify only the past N segments_N's still
+      // exist, and, I can open a reader on each:
+      long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+
+      dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
+      int expectedCount = 0;
+
+      for(int i=0;i<N+1;i++) {
+        try {
+          IndexReader reader = IndexReader.open(dir, true);
+
+          // Work backwards in commits on what the expected
+          // count should be.
+          searcher = newSearcher(reader);
+          hits = searcher.search(query, null, 1000).scoreDocs;
+          assertEquals(expectedCount, hits.length);
+          searcher.close();
+          if (expectedCount == 0) {
+            expectedCount = 16;
+          } else if (expectedCount == 16) {
+            expectedCount = 17;
+          } else if (expectedCount == 17) {
+            expectedCount = 0;
+          }
+          reader.close();
+          if (i == N) {
+            fail("should have failed on commits before last " + N);
+          }
+        } catch (IOException e) {
+          if (i != N) {
+            throw e;
+          }
+        }
+        if (i < N) {
+          dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
+        }
+        gen--;
+      }
+      
+      dir.close();
+    }
+  }
+
+  private void addDoc(IndexWriter writer) throws IOException
+  {
+    Document doc = new Document();
+    doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestDirectoryReader.java b/lucene/backwards/src/test/org/apache/lucene/index/TestDirectoryReader.java
new file mode 100644
index 0000000..4e4f249
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestDirectoryReader.java
@@ -0,0 +1,215 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.Directory;
+
+import java.io.IOException;
+import java.util.Random;
+
+public class TestDirectoryReader extends LuceneTestCase {
+  protected Directory dir;
+  private Document doc1;
+  private Document doc2;
+  protected SegmentReader [] readers = new SegmentReader[2];
+  protected SegmentInfos sis;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    dir = newDirectory();
+    doc1 = new Document();
+    doc2 = new Document();
+    DocHelper.setupDoc(doc1);
+    DocHelper.setupDoc(doc2);
+    DocHelper.writeDoc(random, dir, doc1);
+    DocHelper.writeDoc(random, dir, doc2);
+    sis = new SegmentInfos();
+    sis.read(dir);
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    if (readers[0] != null) readers[0].close();
+    if (readers[1] != null) readers[1].close();
+    dir.close();
+    super.tearDown();
+  }
+
+  protected IndexReader openReader() throws IOException {
+    IndexReader reader;
+    reader = IndexReader.open(dir, false);
+    assertTrue(reader instanceof DirectoryReader);
+
+    assertTrue(dir != null);
+    assertTrue(sis != null);
+    assertTrue(reader != null);
+    
+    return reader;
+  }
+
+  public void test() throws Exception {
+    doTestDocument();
+    doTestUndeleteAll();
+  }    
+
+  public void doTestDocument() throws IOException {
+    sis.read(dir);
+    IndexReader reader = openReader();
+    assertTrue(reader != null);
+    Document newDoc1 = reader.document(0);
+    assertTrue(newDoc1 != null);
+    assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size());
+    Document newDoc2 = reader.document(1);
+    assertTrue(newDoc2 != null);
+    assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
+    TermFreqVector vector = reader.getTermFreqVector(0, DocHelper.TEXT_FIELD_2_KEY);
+    assertTrue(vector != null);
+    TestSegmentReader.checkNorms(reader);
+    reader.close();
+  }
+
+  public void doTestUndeleteAll() throws IOException {
+    sis.read(dir);
+    IndexReader reader = openReader();
+    assertTrue(reader != null);
+    assertEquals( 2, reader.numDocs() );
+    reader.deleteDocument(0);
+    assertEquals( 1, reader.numDocs() );
+    reader.undeleteAll();
+    assertEquals( 2, reader.numDocs() );
+
+    // Ensure undeleteAll survives commit/close/reopen:
+    reader.commit();
+    reader.close();
+
+    if (reader instanceof MultiReader)
+      // MultiReader does not "own" the directory so it does
+      // not write the changes to sis on commit:
+      sis.commit(dir);
+
+    sis.read(dir);
+    reader = openReader();
+    assertEquals( 2, reader.numDocs() );
+
+    reader.deleteDocument(0);
+    assertEquals( 1, reader.numDocs() );
+    reader.commit();
+    reader.close();
+    if (reader instanceof MultiReader)
+      // MultiReader does not "own" the directory so it does
+      // not write the changes to sis on commit:
+      sis.commit(dir);
+    sis.read(dir);
+    reader = openReader();
+    assertEquals( 1, reader.numDocs() );
+    reader.close();
+  }
+        
+  public void testIsCurrent() throws IOException {
+    Directory ramDir1=newDirectory();
+    addDoc(random, ramDir1, "test foo", true);
+    Directory ramDir2=newDirectory();
+    addDoc(random, ramDir2, "test blah", true);
+    IndexReader[] readers = new IndexReader[]{IndexReader.open(ramDir1, false), IndexReader.open(ramDir2, false)};
+    MultiReader mr = new MultiReader(readers);
+    assertTrue(mr.isCurrent());   // just opened, must be current
+    addDoc(random, ramDir1, "more text", false);
+    assertFalse(mr.isCurrent());   // has been modified, not current anymore
+    addDoc(random, ramDir2, "even more text", false);
+    assertFalse(mr.isCurrent());   // has been modified even more, not current anymore
+    try {
+      mr.getVersion();
+      fail();
+    } catch (UnsupportedOperationException e) {
+      // expected exception
+    }
+    mr.close();
+    ramDir1.close();
+    ramDir2.close();
+  }
+
+  public void testMultiTermDocs() throws IOException {
+    Directory ramDir1=newDirectory();
+    addDoc(random, ramDir1, "test foo", true);
+    Directory ramDir2=newDirectory();
+    addDoc(random, ramDir2, "test blah", true);
+    Directory ramDir3=newDirectory();
+    addDoc(random, ramDir3, "test wow", true);
+
+    IndexReader[] readers1 = new IndexReader[]{IndexReader.open(ramDir1, false), IndexReader.open(ramDir3, false)};
+    IndexReader[] readers2 = new IndexReader[]{IndexReader.open(ramDir1, false), IndexReader.open(ramDir2, false), IndexReader.open(ramDir3, false)};
+    MultiReader mr2 = new MultiReader(readers1);
+    MultiReader mr3 = new MultiReader(readers2);
+
+    // test mixing up TermDocs and TermEnums from different readers.
+    TermDocs td2 = mr2.termDocs();
+    TermEnum te3 = mr3.terms(new Term("body","wow"));
+    td2.seek(te3);
+    int ret = 0;
+
+    // This should blow up if we forget to check that the TermEnum is from the same
+    // reader as the TermDocs.
+    while (td2.next()) ret += td2.doc();
+    td2.close();
+    te3.close();
+
+    // really a dummy assert to ensure that we got some docs and to ensure that
+    // nothing is optimized out.
+    assertTrue(ret > 0);
+    readers1[0].close();
+    readers1[1].close();
+    readers2[0].close();
+    readers2[1].close();
+    readers2[2].close();
+    ramDir1.close();
+    ramDir2.close();
+    ramDir3.close();
+  }
+
+  public void testAllTermDocs() throws IOException {
+    IndexReader reader = openReader();
+    int NUM_DOCS = 2;
+    TermDocs td = reader.termDocs(null);
+    for(int i=0;i<NUM_DOCS;i++) {
+      assertTrue(td.next());
+      assertEquals(i, td.doc());
+      assertEquals(1, td.freq());
+    }
+    td.close();
+    reader.close();
+  }
+
+  private void addDoc(Random random, Directory ramDir1, String s, boolean create) throws IOException {
+    IndexWriter iw = new IndexWriter(ramDir1, newIndexWriterConfig( 
+        TEST_VERSION_CURRENT, 
+        new MockAnalyzer(random)).setOpenMode(
+        create ? OpenMode.CREATE : OpenMode.APPEND));
+    Document doc = new Document();
+    doc.add(newField("body", s, Field.Store.YES, Field.Index.ANALYZED));
+    iw.addDocument(doc);
+    iw.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestDoc.java b/lucene/backwards/src/test/org/apache/lucene/index/TestDoc.java
new file mode 100644
index 0000000..e088f20
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestDoc.java
@@ -0,0 +1,247 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+
+import java.util.LinkedList;
+import java.util.Collection;
+
+import junit.framework.TestSuite;
+import junit.textui.TestRunner;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+/** JUnit adaptation of an older test case DocTest. */
+public class TestDoc extends LuceneTestCase {
+
+    /** Main for running test case by itself. */
+    public static void main(String args[]) {
+        TestRunner.run (new TestSuite(TestDoc.class));
+    }
+
+    private File workDir;
+    private File indexDir;
+    private LinkedList<File> files;
+
+    /** Set the test case. This test case needs
+     *  a few text files created in the current working directory.
+     */
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        workDir = _TestUtil.getTempDir("TestDoc");
+        workDir.mkdirs();
+
+        indexDir = _TestUtil.getTempDir("testIndex");
+        indexDir.mkdirs();
+
+        Directory directory = newFSDirectory(indexDir);
+        directory.close();
+
+        files = new LinkedList<File>();
+        files.add(createOutput("test.txt",
+            "This is the first test file"
+        ));
+
+        files.add(createOutput("test2.txt",
+            "This is the second test file"
+        ));
+    }
+
+    private File createOutput(String name, String text) throws IOException {
+        FileWriter fw = null;
+        PrintWriter pw = null;
+
+        try {
+            File f = new File(workDir, name);
+            if (f.exists()) f.delete();
+
+            fw = new FileWriter(f);
+            pw = new PrintWriter(fw);
+            pw.println(text);
+            return f;
+
+        } finally {
+            if (pw != null) pw.close();
+            if (fw != null) fw.close();
+        }
+    }
+
+
+    /** This test executes a number of merges and compares the contents of
+     *  the segments created when using compound file or not using one.
+     *
+     *  TODO: the original test used to print the segment contents to System.out
+     *        for visual validation. To have the same effect, a new method
+     *        checkSegment(String name, ...) should be created that would
+     *        assert various things about the segment.
+     */
+    public void testIndexAndMerge() throws Exception {
+      StringWriter sw = new StringWriter();
+      PrintWriter out = new PrintWriter(sw, true);
+      
+      Directory directory = newFSDirectory(indexDir);
+      IndexWriter writer = new IndexWriter(
+          directory,
+          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+              setOpenMode(OpenMode.CREATE).
+              setMaxBufferedDocs(-1).
+              setMergePolicy(newLogMergePolicy(10))
+      );
+
+      SegmentInfo si1 = indexDoc(writer, "test.txt");
+      printSegment(out, si1);
+
+      SegmentInfo si2 = indexDoc(writer, "test2.txt");
+      printSegment(out, si2);
+      writer.close();
+
+      SegmentInfo siMerge = merge(si1, si2, "merge", false);
+      printSegment(out, siMerge);
+
+      SegmentInfo siMerge2 = merge(si1, si2, "merge2", false);
+      printSegment(out, siMerge2);
+
+      SegmentInfo siMerge3 = merge(siMerge, siMerge2, "merge3", false);
+      printSegment(out, siMerge3);
+      
+      directory.close();
+      out.close();
+      sw.close();
+      String multiFileOutput = sw.getBuffer().toString();
+      //System.out.println(multiFileOutput);
+
+      sw = new StringWriter();
+      out = new PrintWriter(sw, true);
+
+      directory = newFSDirectory(indexDir);
+      writer = new IndexWriter(
+          directory,
+          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+              setOpenMode(OpenMode.CREATE).
+              setMaxBufferedDocs(-1).
+              setMergePolicy(newLogMergePolicy(10))
+      );
+
+      si1 = indexDoc(writer, "test.txt");
+      printSegment(out, si1);
+
+      si2 = indexDoc(writer, "test2.txt");
+      printSegment(out, si2);
+      writer.close();
+
+      siMerge = merge(si1, si2, "merge", true);
+      printSegment(out, siMerge);
+
+      siMerge2 = merge(si1, si2, "merge2", true);
+      printSegment(out, siMerge2);
+
+      siMerge3 = merge(siMerge, siMerge2, "merge3", true);
+      printSegment(out, siMerge3);
+      
+      directory.close();
+      out.close();
+      sw.close();
+      String singleFileOutput = sw.getBuffer().toString();
+
+      assertEquals(multiFileOutput, singleFileOutput);
+   }
+
+   private SegmentInfo indexDoc(IndexWriter writer, String fileName)
+   throws Exception
+   {
+      File file = new File(workDir, fileName);
+      Document doc = new Document();
+      doc.add(new Field("contents", new FileReader(file)));
+      writer.addDocument(doc);
+      writer.commit();
+      return writer.newestSegment();
+   }
+
+
+   private SegmentInfo merge(SegmentInfo si1, SegmentInfo si2, String merged, boolean useCompoundFile)
+   throws Exception {
+      SegmentReader r1 = SegmentReader.get(true, si1, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+      SegmentReader r2 = SegmentReader.get(true, si2, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+
+      SegmentMerger merger = new SegmentMerger(si1.dir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL, merged, null, null, new FieldInfos());
+
+      merger.add(r1);
+      merger.add(r2);
+      merger.merge();
+      r1.close();
+      r2.close();
+      
+      final SegmentInfo info = new SegmentInfo(merged, si1.docCount + si2.docCount, si1.dir,
+                                               false, true,
+                                               merger.fieldInfos().hasProx(),
+                                               merger.fieldInfos().hasVectors());
+      
+      if (useCompoundFile) {
+        Collection<String> filesToDelete = merger.createCompoundFile(merged + ".cfs", info);
+        info.setUseCompoundFile(true);
+        for (final String fileToDelete : filesToDelete) 
+          si1.dir.deleteFile(fileToDelete);
+      }
+
+      return info;
+   }
+
+
+   private void printSegment(PrintWriter out, SegmentInfo si)
+   throws Exception {
+      SegmentReader reader = SegmentReader.get(true, si, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+
+      for (int i = 0; i < reader.numDocs(); i++)
+        out.println(reader.document(i));
+
+      TermEnum tis = reader.terms();
+      while (tis.next()) {
+        out.print(tis.term());
+        out.println(" DF=" + tis.docFreq());
+
+        TermPositions positions = reader.termPositions(tis.term());
+        try {
+          while (positions.next()) {
+            out.print(" doc=" + positions.doc());
+            out.print(" TF=" + positions.freq());
+            out.print(" pos=");
+            out.print(positions.nextPosition());
+            for (int j = 1; j < positions.freq(); j++)
+              out.print("," + positions.nextPosition());
+            out.println("");
+          }
+        } finally {
+          positions.close();
+        }
+      }
+      tis.close();
+      reader.close();
+    }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestDocumentWriter.java b/lucene/backwards/src/test/org/apache/lucene/index/TestDocumentWriter.java
new file mode 100644
index 0000000..0d7abb0
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestDocumentWriter.java
@@ -0,0 +1,333 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.WhitespaceTokenizer;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestDocumentWriter extends LuceneTestCase {
+  private Directory dir;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    dir = newDirectory();
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    dir.close();
+    super.tearDown();
+  }
+
+  public void test() {
+    assertTrue(dir != null);
+  }
+
+  public void testAddDocument() throws Exception {
+    Document testDoc = new Document();
+    DocHelper.setupDoc(testDoc);
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer.addDocument(testDoc);
+    writer.commit();
+    SegmentInfo info = writer.newestSegment();
+    writer.close();
+    //After adding the document, we should be able to read it back in
+    SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+    assertTrue(reader != null);
+    Document doc = reader.document(0);
+    assertTrue(doc != null);
+
+    //System.out.println("Document: " + doc);
+    Fieldable [] fields = doc.getFields("textField2");
+    assertTrue(fields != null && fields.length == 1);
+    assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_2_TEXT));
+    assertTrue(fields[0].isTermVectorStored());
+
+    fields = doc.getFields("textField1");
+    assertTrue(fields != null && fields.length == 1);
+    assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_1_TEXT));
+    assertFalse(fields[0].isTermVectorStored());
+
+    fields = doc.getFields("keyField");
+    assertTrue(fields != null && fields.length == 1);
+    assertTrue(fields[0].stringValue().equals(DocHelper.KEYWORD_TEXT));
+
+    fields = doc.getFields(DocHelper.NO_NORMS_KEY);
+    assertTrue(fields != null && fields.length == 1);
+    assertTrue(fields[0].stringValue().equals(DocHelper.NO_NORMS_TEXT));
+
+    fields = doc.getFields(DocHelper.TEXT_FIELD_3_KEY);
+    assertTrue(fields != null && fields.length == 1);
+    assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_3_TEXT));
+
+    // test that the norms are not present in the segment if
+    // omitNorms is true
+    for (int i = 0; i < reader.core.fieldInfos.size(); i++) {
+      FieldInfo fi = reader.core.fieldInfos.fieldInfo(i);
+      if (fi.isIndexed) {
+        assertTrue(fi.omitNorms == !reader.hasNorms(fi.name));
+      }
+    }
+    reader.close();
+  }
+
+  public void testPositionIncrementGap() throws IOException {
+    Analyzer analyzer = new Analyzer() {
+      @Override
+      public TokenStream tokenStream(String fieldName, Reader reader) {
+        return new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
+      }
+
+      @Override
+      public int getPositionIncrementGap(String fieldName) {
+        return 500;
+      }
+    };
+
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
+
+    Document doc = new Document();
+    doc.add(newField("repeated", "repeated one", Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(newField("repeated", "repeated two", Field.Store.YES, Field.Index.ANALYZED));
+
+    writer.addDocument(doc);
+    writer.commit();
+    SegmentInfo info = writer.newestSegment();
+    writer.close();
+    SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+
+    TermPositions termPositions = reader.termPositions(new Term("repeated", "repeated"));
+    assertTrue(termPositions.next());
+    int freq = termPositions.freq();
+    assertEquals(2, freq);
+    assertEquals(0, termPositions.nextPosition());
+    assertEquals(502, termPositions.nextPosition());
+    reader.close();
+  }
+
+  public void testTokenReuse() throws IOException {
+    Analyzer analyzer = new Analyzer() {
+      @Override
+      public TokenStream tokenStream(String fieldName, Reader reader) {
+        return new TokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader)) {
+          boolean first = true;
+          AttributeSource.State state;
+
+          @Override
+          public boolean incrementToken() throws IOException {
+            if (state != null) {
+              restoreState(state);
+              payloadAtt.setPayload(null);
+              posIncrAtt.setPositionIncrement(0);
+              termAtt.setEmpty().append("b");
+              state = null;
+              return true;
+            }
+
+            boolean hasNext = input.incrementToken();
+            if (!hasNext) return false;
+            if (Character.isDigit(termAtt.buffer()[0])) {
+              posIncrAtt.setPositionIncrement(termAtt.buffer()[0] - '0');
+            }
+            if (first) {
+              // set payload on first position only
+              payloadAtt.setPayload(new Payload(new byte[]{100}));
+              first = false;
+            }
+
+            // index a "synonym" for every token
+            state = captureState();
+            return true;
+
+          }
+
+          @Override
+          public void reset() throws IOException {
+            super.reset();
+            first = true;
+            state = null;
+          }
+
+          final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+          final PayloadAttribute payloadAtt = addAttribute(PayloadAttribute.class);
+          final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+        };
+      }
+    };
+
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
+
+    Document doc = new Document();
+    doc.add(newField("f1", "a 5 a a", Field.Store.YES, Field.Index.ANALYZED));
+
+    writer.addDocument(doc);
+    writer.commit();
+    SegmentInfo info = writer.newestSegment();
+    writer.close();
+    SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+
+    TermPositions termPositions = reader.termPositions(new Term("f1", "a"));
+    assertTrue(termPositions.next());
+    int freq = termPositions.freq();
+    assertEquals(3, freq);
+    assertEquals(0, termPositions.nextPosition());
+    assertEquals(true, termPositions.isPayloadAvailable());
+    assertEquals(6, termPositions.nextPosition());
+    assertEquals(false, termPositions.isPayloadAvailable());
+    assertEquals(7, termPositions.nextPosition());
+    assertEquals(false, termPositions.isPayloadAvailable());
+    reader.close();
+  }
+
+
+  public void testPreAnalyzedField() throws IOException {
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    
+    doc.add(new Field("preanalyzed", new TokenStream() {
+      private String[] tokens = new String[] {"term1", "term2", "term3", "term2"};
+      private int index = 0;
+      
+      private CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+      
+      @Override
+      public boolean incrementToken() throws IOException {
+        if (index == tokens.length) {
+          return false;
+        } else {
+          clearAttributes();
+          termAtt.setEmpty().append(tokens[index++]);
+          return true;
+        }        
+      }
+      
+    }, TermVector.NO));
+    
+    writer.addDocument(doc);
+    writer.commit();
+    SegmentInfo info = writer.newestSegment();
+    writer.close();
+    SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+
+    TermPositions termPositions = reader.termPositions(new Term("preanalyzed", "term1"));
+    assertTrue(termPositions.next());
+    assertEquals(1, termPositions.freq());
+    assertEquals(0, termPositions.nextPosition());
+
+    termPositions.seek(new Term("preanalyzed", "term2"));
+    assertTrue(termPositions.next());
+    assertEquals(2, termPositions.freq());
+    assertEquals(1, termPositions.nextPosition());
+    assertEquals(3, termPositions.nextPosition());
+    
+    termPositions.seek(new Term("preanalyzed", "term3"));
+    assertTrue(termPositions.next());
+    assertEquals(1, termPositions.freq());
+    assertEquals(2, termPositions.nextPosition());
+    reader.close();
+  }
+
+  /**
+   * Test adding two fields with the same name, but 
+   * with different term vector setting (LUCENE-766).
+   */
+  public void testMixedTermVectorSettingsSameField() throws Exception {
+    Document doc = new Document();
+    // f1 first without tv then with tv
+    doc.add(newField("f1", "v1", Store.YES, Index.NOT_ANALYZED, TermVector.NO));
+    doc.add(newField("f1", "v2", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    // f2 first with tv then without tv
+    doc.add(newField("f2", "v1", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(newField("f2", "v2", Store.YES, Index.NOT_ANALYZED, TermVector.NO));
+
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer.addDocument(doc);
+    writer.close();
+
+    _TestUtil.checkIndex(dir);
+
+    IndexReader reader = IndexReader.open(dir, true);
+    // f1
+    TermFreqVector tfv1 = reader.getTermFreqVector(0, "f1");
+    assertNotNull(tfv1);
+    assertEquals("the 'with_tv' setting should rule!",2,tfv1.getTerms().length);
+    // f2
+    TermFreqVector tfv2 = reader.getTermFreqVector(0, "f2");
+    assertNotNull(tfv2);
+    assertEquals("the 'with_tv' setting should rule!",2,tfv2.getTerms().length);
+    reader.close();
+  }
+
+  /**
+   * Test adding two fields with the same name, one indexed
+   * the other stored only. The omitNorms and omitTermFreqAndPositions setting
+   * of the stored field should not affect the indexed one (LUCENE-1590)
+   */
+  public void testLUCENE_1590() throws Exception {
+    Document doc = new Document();
+    // f1 has no norms
+    doc.add(newField("f1", "v1", Store.NO, Index.ANALYZED_NO_NORMS));
+    doc.add(newField("f1", "v2", Store.YES, Index.NO));
+    // f2 has no TF
+    Field f = newField("f2", "v1", Store.NO, Index.ANALYZED);
+    f.setIndexOptions(IndexOptions.DOCS_ONLY);
+    doc.add(f);
+    doc.add(newField("f2", "v2", Store.YES, Index.NO));
+
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer.addDocument(doc);
+    writer.optimize(); // be sure to have a single segment
+    writer.close();
+
+    _TestUtil.checkIndex(dir);
+
+    SegmentReader reader = SegmentReader.getOnlySegmentReader(dir);
+    FieldInfos fi = reader.fieldInfos();
+    // f1
+    assertFalse("f1 should have no norms", reader.hasNorms("f1"));
+    assertEquals("omitTermFreqAndPositions field bit should not be set for f1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, fi.fieldInfo("f1").indexOptions);
+    // f2
+    assertTrue("f2 should have norms", reader.hasNorms("f2"));
+    assertEquals("omitTermFreqAndPositions field bit should be set for f2", IndexOptions.DOCS_ONLY, fi.fieldInfo("f2").indexOptions);
+    reader.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestFieldInfos.java b/lucene/backwards/src/test/org/apache/lucene/index/TestFieldInfos.java
new file mode 100644
index 0000000..63421ba
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestFieldInfos.java
@@ -0,0 +1,80 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IndexOutput;
+
+import java.io.IOException;
+
+//import org.cnlp.utils.properties.ResourceBundleHelper;
+
+public class TestFieldInfos extends LuceneTestCase {
+
+  private Document testDoc = new Document();
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    DocHelper.setupDoc(testDoc);
+  }
+
+  public void test() throws IOException {
+    //Positive test of FieldInfos
+    assertTrue(testDoc != null);
+    FieldInfos fieldInfos = new FieldInfos();
+    fieldInfos.add(testDoc);
+    //Since the complement is stored as well in the fields map
+    assertTrue(fieldInfos.size() == DocHelper.all.size()); //this is all b/c we are using the no-arg constructor
+    Directory dir = newDirectory();
+    String name = "testFile";
+    IndexOutput output = dir.createOutput(name);
+    assertTrue(output != null);
+    //Use a RAMOutputStream
+    
+      fieldInfos.write(output);
+      output.close();
+      assertTrue(dir.fileLength(name) > 0);
+      FieldInfos readIn = new FieldInfos(dir, name);
+      assertTrue(fieldInfos.size() == readIn.size());
+      FieldInfo info = readIn.fieldInfo("textField1");
+      assertTrue(info != null);
+      assertTrue(info.storeTermVector == false);
+      assertTrue(info.omitNorms == false);
+
+      info = readIn.fieldInfo("textField2");
+      assertTrue(info != null);
+      assertTrue(info.storeTermVector == true);
+      assertTrue(info.omitNorms == false);
+
+      info = readIn.fieldInfo("textField3");
+      assertTrue(info != null);
+      assertTrue(info.storeTermVector == false);
+      assertTrue(info.omitNorms == true);
+
+      info = readIn.fieldInfo("omitNorms");
+      assertTrue(info != null);
+      assertTrue(info.storeTermVector == false);
+      assertTrue(info.omitNorms == true);
+
+      dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestFieldsReader.java b/lucene/backwards/src/test/org/apache/lucene/index/TestFieldsReader.java
new file mode 100644
index 0000000..fdbfc3b
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestFieldsReader.java
@@ -0,0 +1,591 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericField;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.document.LoadFirstFieldSelector;
+import org.apache.lucene.document.SetBasedFieldSelector;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.BufferedIndexInput;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+public class TestFieldsReader extends LuceneTestCase {
+  private static Directory dir;
+  private static Document testDoc = new Document();
+  private static FieldInfos fieldInfos = null;
+  private final static String TEST_SEGMENT_NAME = "_0";
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    fieldInfos = new FieldInfos();
+    DocHelper.setupDoc(testDoc);
+    fieldInfos.add(testDoc);
+    dir = newDirectory();
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy());
+    ((LogMergePolicy) conf.getMergePolicy()).setUseCompoundFile(false);
+    IndexWriter writer = new IndexWriter(dir, conf);
+    writer.addDocument(testDoc);
+    writer.close();
+  }
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    FaultyIndexInput.doFail = false;
+  }
+
+  @AfterClass
+  public static void afterClass() throws Exception {
+    dir.close();
+    dir = null;
+    fieldInfos = null;
+    testDoc = null;
+  }
+  public void test() throws IOException {
+    assertTrue(dir != null);
+    assertTrue(fieldInfos != null);
+    FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
+    assertTrue(reader.size() == 1);
+    Document doc = reader.doc(0, null);
+    assertTrue(doc != null);
+    assertTrue(doc.getField(DocHelper.TEXT_FIELD_1_KEY) != null);
+
+    Fieldable field = doc.getField(DocHelper.TEXT_FIELD_2_KEY);
+    assertTrue(field != null);
+    assertTrue(field.isTermVectorStored() == true);
+
+    assertTrue(field.isStoreOffsetWithTermVector() == true);
+    assertTrue(field.isStorePositionWithTermVector() == true);
+    assertTrue(field.getOmitNorms() == false);
+    assertTrue(field.getIndexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+
+    field = doc.getField(DocHelper.TEXT_FIELD_3_KEY);
+    assertTrue(field != null);
+    assertTrue(field.isTermVectorStored() == false);
+    assertTrue(field.isStoreOffsetWithTermVector() == false);
+    assertTrue(field.isStorePositionWithTermVector() == false);
+    assertTrue(field.getOmitNorms() == true);
+    assertTrue(field.getIndexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+
+    field = doc.getField(DocHelper.NO_TF_KEY);
+    assertTrue(field != null);
+    assertTrue(field.isTermVectorStored() == false);
+    assertTrue(field.isStoreOffsetWithTermVector() == false);
+    assertTrue(field.isStorePositionWithTermVector() == false);
+    assertTrue(field.getOmitNorms() == false);
+    assertTrue(field.getIndexOptions() == IndexOptions.DOCS_ONLY);
+    reader.close();
+  }
+
+
+  public void testLazyFields() throws Exception {
+    assertTrue(dir != null);
+    assertTrue(fieldInfos != null);
+    FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
+    assertTrue(reader.size() == 1);
+    Set<String> loadFieldNames = new HashSet<String>();
+    loadFieldNames.add(DocHelper.TEXT_FIELD_1_KEY);
+    loadFieldNames.add(DocHelper.TEXT_FIELD_UTF1_KEY);
+    Set<String> lazyFieldNames = new HashSet<String>();
+    //new String[]{DocHelper.LARGE_LAZY_FIELD_KEY, DocHelper.LAZY_FIELD_KEY, DocHelper.LAZY_FIELD_BINARY_KEY};
+    lazyFieldNames.add(DocHelper.LARGE_LAZY_FIELD_KEY);
+    lazyFieldNames.add(DocHelper.LAZY_FIELD_KEY);
+    lazyFieldNames.add(DocHelper.LAZY_FIELD_BINARY_KEY);
+    lazyFieldNames.add(DocHelper.TEXT_FIELD_UTF2_KEY);
+    SetBasedFieldSelector fieldSelector = new SetBasedFieldSelector(loadFieldNames, lazyFieldNames);
+    Document doc = reader.doc(0, fieldSelector);
+    assertTrue("doc is null and it shouldn't be", doc != null);
+    Fieldable field = doc.getFieldable(DocHelper.LAZY_FIELD_KEY);
+    assertTrue("field is null and it shouldn't be", field != null);
+    assertTrue("field is not lazy and it should be", field.isLazy());
+    String value = field.stringValue();
+    assertTrue("value is null and it shouldn't be", value != null);
+    assertTrue(value + " is not equal to " + DocHelper.LAZY_FIELD_TEXT, value.equals(DocHelper.LAZY_FIELD_TEXT) == true);
+    assertTrue("calling stringValue() twice should give same reference", field.stringValue() == field.stringValue());
+
+    field = doc.getFieldable(DocHelper.TEXT_FIELD_1_KEY);
+    assertTrue("field is null and it shouldn't be", field != null);
+    assertTrue("Field is lazy and it should not be", field.isLazy() == false);
+    field = doc.getFieldable(DocHelper.TEXT_FIELD_UTF1_KEY);
+    assertTrue("field is null and it shouldn't be", field != null);
+    assertTrue("Field is lazy and it should not be", field.isLazy() == false);
+    assertTrue(field.stringValue() + " is not equal to " + DocHelper.FIELD_UTF1_TEXT, field.stringValue().equals(DocHelper.FIELD_UTF1_TEXT) == true);
+
+    field = doc.getFieldable(DocHelper.TEXT_FIELD_UTF2_KEY);
+    assertTrue("field is null and it shouldn't be", field != null);
+    assertTrue("Field is lazy and it should not be", field.isLazy() == true);
+    assertTrue(field.stringValue() + " is not equal to " + DocHelper.FIELD_UTF2_TEXT, field.stringValue().equals(DocHelper.FIELD_UTF2_TEXT) == true);
+
+    field = doc.getFieldable(DocHelper.LAZY_FIELD_BINARY_KEY);
+    assertTrue("field is null and it shouldn't be", field != null);
+    assertTrue("stringValue isn't null for lazy binary field", field.stringValue() == null);
+
+    byte [] bytes = field.getBinaryValue();
+    assertTrue("bytes is null and it shouldn't be", bytes != null);
+    assertTrue("", DocHelper.LAZY_FIELD_BINARY_BYTES.length == bytes.length);
+    assertTrue("calling binaryValue() twice should give same reference", field.getBinaryValue() == field.getBinaryValue());
+    for (int i = 0; i < bytes.length; i++) {
+      assertTrue("byte[" + i + "] is mismatched", bytes[i] == DocHelper.LAZY_FIELD_BINARY_BYTES[i]);
+
+    }
+    reader.close();
+  }
+
+  public void testLatentFields() throws Exception {
+    assertTrue(dir != null);
+    assertTrue(fieldInfos != null);
+    FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
+    assertTrue(reader.size() == 1);
+    Set<String> loadFieldNames = new HashSet<String>();
+    loadFieldNames.add(DocHelper.TEXT_FIELD_1_KEY);
+    loadFieldNames.add(DocHelper.TEXT_FIELD_UTF1_KEY);
+    Set<String> lazyFieldNames = new HashSet<String>();
+    //new String[]{DocHelper.LARGE_LAZY_FIELD_KEY, DocHelper.LAZY_FIELD_KEY, DocHelper.LAZY_FIELD_BINARY_KEY};
+    lazyFieldNames.add(DocHelper.LARGE_LAZY_FIELD_KEY);
+    lazyFieldNames.add(DocHelper.LAZY_FIELD_KEY);
+    lazyFieldNames.add(DocHelper.LAZY_FIELD_BINARY_KEY);
+    lazyFieldNames.add(DocHelper.TEXT_FIELD_UTF2_KEY);
+
+    // Use LATENT instead of LAZY
+    SetBasedFieldSelector fieldSelector = new SetBasedFieldSelector(loadFieldNames, lazyFieldNames) {
+        @Override
+        public FieldSelectorResult accept(String fieldName) {
+          final FieldSelectorResult result = super.accept(fieldName);
+          if (result == FieldSelectorResult.LAZY_LOAD) {
+            return FieldSelectorResult.LATENT;
+          } else {
+            return result;
+          }
+        }
+      };
+
+    Document doc = reader.doc(0, fieldSelector);
+    assertTrue("doc is null and it shouldn't be", doc != null);
+    Fieldable field = doc.getFieldable(DocHelper.LAZY_FIELD_KEY);
+    assertTrue("field is null and it shouldn't be", field != null);
+    assertTrue("field is not lazy and it should be", field.isLazy());
+    String value = field.stringValue();
+    assertTrue("value is null and it shouldn't be", value != null);
+    assertTrue(value + " is not equal to " + DocHelper.LAZY_FIELD_TEXT, value.equals(DocHelper.LAZY_FIELD_TEXT) == true);
+    assertTrue("calling stringValue() twice should give different references", field.stringValue() != field.stringValue());
+
+    field = doc.getFieldable(DocHelper.TEXT_FIELD_1_KEY);
+    assertTrue("field is null and it shouldn't be", field != null);
+    assertTrue("Field is lazy and it should not be", field.isLazy() == false);
+    assertTrue("calling stringValue() twice should give same reference", field.stringValue() == field.stringValue());
+
+    field = doc.getFieldable(DocHelper.TEXT_FIELD_UTF1_KEY);
+    assertTrue("field is null and it shouldn't be", field != null);
+    assertTrue("Field is lazy and it should not be", field.isLazy() == false);
+    assertTrue(field.stringValue() + " is not equal to " + DocHelper.FIELD_UTF1_TEXT, field.stringValue().equals(DocHelper.FIELD_UTF1_TEXT) == true);
+    assertTrue("calling stringValue() twice should give same reference", field.stringValue() == field.stringValue());
+
+    field = doc.getFieldable(DocHelper.TEXT_FIELD_UTF2_KEY);
+    assertTrue("field is null and it shouldn't be", field != null);
+    assertTrue("Field is lazy and it should not be", field.isLazy() == true);
+    assertTrue(field.stringValue() + " is not equal to " + DocHelper.FIELD_UTF2_TEXT, field.stringValue().equals(DocHelper.FIELD_UTF2_TEXT) == true);
+    assertTrue("calling stringValue() twice should give different references", field.stringValue() != field.stringValue());
+
+    field = doc.getFieldable(DocHelper.LAZY_FIELD_BINARY_KEY);
+    assertTrue("field is null and it shouldn't be", field != null);
+    assertTrue("stringValue isn't null for lazy binary field", field.stringValue() == null);
+    assertTrue("calling binaryValue() twice should give different references", field.getBinaryValue() != field.getBinaryValue());
+
+    byte [] bytes = field.getBinaryValue();
+    assertTrue("bytes is null and it shouldn't be", bytes != null);
+    assertTrue("", DocHelper.LAZY_FIELD_BINARY_BYTES.length == bytes.length);
+    for (int i = 0; i < bytes.length; i++) {
+      assertTrue("byte[" + i + "] is mismatched", bytes[i] == DocHelper.LAZY_FIELD_BINARY_BYTES[i]);
+
+    }
+    reader.close();
+  }
+
+
+
+
+  public void testLazyFieldsAfterClose() throws Exception {
+    assertTrue(dir != null);
+    assertTrue(fieldInfos != null);
+    FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
+    assertTrue(reader.size() == 1);
+    Set<String> loadFieldNames = new HashSet<String>();
+    loadFieldNames.add(DocHelper.TEXT_FIELD_1_KEY);
+    loadFieldNames.add(DocHelper.TEXT_FIELD_UTF1_KEY);
+    Set<String> lazyFieldNames = new HashSet<String>();
+    lazyFieldNames.add(DocHelper.LARGE_LAZY_FIELD_KEY);
+    lazyFieldNames.add(DocHelper.LAZY_FIELD_KEY);
+    lazyFieldNames.add(DocHelper.LAZY_FIELD_BINARY_KEY);
+    lazyFieldNames.add(DocHelper.TEXT_FIELD_UTF2_KEY);
+    SetBasedFieldSelector fieldSelector = new SetBasedFieldSelector(loadFieldNames, lazyFieldNames);
+    Document doc = reader.doc(0, fieldSelector);
+    assertTrue("doc is null and it shouldn't be", doc != null);
+    Fieldable field = doc.getFieldable(DocHelper.LAZY_FIELD_KEY);
+    assertTrue("field is null and it shouldn't be", field != null);
+    assertTrue("field is not lazy and it should be", field.isLazy());
+    reader.close();
+    try {
+      field.stringValue();
+      fail("did not hit AlreadyClosedException as expected");
+    } catch (AlreadyClosedException e) {
+      // expected
+    }
+  }
+
+  public void testLoadFirst() throws Exception {
+    assertTrue(dir != null);
+    assertTrue(fieldInfos != null);
+    FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
+    assertTrue(reader.size() == 1);
+    LoadFirstFieldSelector fieldSelector = new LoadFirstFieldSelector();
+    Document doc = reader.doc(0, fieldSelector);
+    assertTrue("doc is null and it shouldn't be", doc != null);
+    int count = 0;
+    List<Fieldable> l = doc.getFields();
+    for (final Fieldable fieldable : l ) {
+      Field field = (Field) fieldable;
+
+      assertTrue("field is null and it shouldn't be", field != null);
+      String sv = field.stringValue();
+      assertTrue("sv is null and it shouldn't be", sv != null);
+      count++;
+    }
+    assertTrue(count + " does not equal: " + 1, count == 1);
+    reader.close();
+  }
+
+  /**
+   * Not really a test per se, but we should have some way of assessing whether this is worthwhile.
+   * <p/>
+   * Must test using a File based directory
+   *
+   * @throws Exception
+   */
+  public void testLazyPerformance() throws Exception {
+    String userName = System.getProperty("user.name");
+    File file = _TestUtil.getTempDir("lazyDir" + userName);
+    Directory tmpDir = newFSDirectory(file);
+    assertTrue(tmpDir != null);
+
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setMergePolicy(newLogMergePolicy());
+    ((LogMergePolicy) conf.getMergePolicy()).setUseCompoundFile(false);
+    IndexWriter writer = new IndexWriter(tmpDir, conf);
+    writer.addDocument(testDoc);
+    writer.close();
+
+    assertTrue(fieldInfos != null);
+    FieldsReader reader;
+    long lazyTime = 0;
+    long regularTime = 0;
+    int length = 10;
+    Set<String> lazyFieldNames = new HashSet<String>();
+    lazyFieldNames.add(DocHelper.LARGE_LAZY_FIELD_KEY);
+    SetBasedFieldSelector fieldSelector = new SetBasedFieldSelector(Collections. <String> emptySet(), lazyFieldNames);
+
+    for (int i = 0; i < length; i++) {
+      reader = new FieldsReader(tmpDir, TEST_SEGMENT_NAME, fieldInfos);
+      assertTrue(reader.size() == 1);
+
+      Document doc;
+      doc = reader.doc(0, null);//Load all of them
+      assertTrue("doc is null and it shouldn't be", doc != null);
+      Fieldable field = doc.getFieldable(DocHelper.LARGE_LAZY_FIELD_KEY);
+      assertTrue("field is null and it shouldn't be", field != null);
+      assertTrue("field is lazy", field.isLazy() == false);
+      String value;
+      long start;
+      long finish;
+      start = System.currentTimeMillis();
+      //On my machine this was always 0ms.
+      value = field.stringValue();
+      finish = System.currentTimeMillis();
+      assertTrue("value is null and it shouldn't be", value != null);
+      regularTime += (finish - start);
+      reader.close();
+      reader = null;
+      doc = null;
+      //Hmmm, are we still in cache???
+      System.gc();
+      reader = new FieldsReader(tmpDir, TEST_SEGMENT_NAME, fieldInfos);
+      doc = reader.doc(0, fieldSelector);
+      field = doc.getFieldable(DocHelper.LARGE_LAZY_FIELD_KEY);
+      assertTrue("field is not lazy", field.isLazy() == true);
+      start = System.currentTimeMillis();
+      //On my machine this took around 50 - 70ms
+      value = field.stringValue();
+      finish = System.currentTimeMillis();
+      assertTrue("value is null and it shouldn't be", value != null);
+      lazyTime += (finish - start);
+      reader.close();
+    }
+    tmpDir.close();
+    
+    if (VERBOSE) {
+      System.out.println("Average Non-lazy time (should be very close to zero): " + regularTime / length + " ms for " + length + " reads");
+      System.out.println("Average Lazy Time (should be greater than zero): " + lazyTime / length + " ms for " + length + " reads");
+    }
+  }
+  
+  public void testLoadSize() throws IOException {
+    FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
+    Document doc;
+    
+    doc = reader.doc(0, new FieldSelector(){
+      public FieldSelectorResult accept(String fieldName) {
+        if (fieldName.equals(DocHelper.TEXT_FIELD_1_KEY) ||
+            fieldName.equals(DocHelper.LAZY_FIELD_BINARY_KEY))
+          return FieldSelectorResult.SIZE;
+        else if (fieldName.equals(DocHelper.TEXT_FIELD_3_KEY))
+          return FieldSelectorResult.LOAD;
+        else
+          return FieldSelectorResult.NO_LOAD;
+      }
+    });
+    Fieldable f1 = doc.getFieldable(DocHelper.TEXT_FIELD_1_KEY);
+    Fieldable f3 = doc.getFieldable(DocHelper.TEXT_FIELD_3_KEY);
+    Fieldable fb = doc.getFieldable(DocHelper.LAZY_FIELD_BINARY_KEY);
+    assertTrue(f1.isBinary());
+    assertTrue(!f3.isBinary());
+    assertTrue(fb.isBinary());
+    assertSizeEquals(2*DocHelper.FIELD_1_TEXT.length(), f1.getBinaryValue());
+    assertEquals(DocHelper.FIELD_3_TEXT, f3.stringValue());
+    assertSizeEquals(DocHelper.LAZY_FIELD_BINARY_BYTES.length, fb.getBinaryValue());
+    
+    reader.close();
+  }
+  
+  private void assertSizeEquals(int size, byte[] sizebytes) {
+    assertEquals((byte) (size>>>24), sizebytes[0]);
+    assertEquals((byte) (size>>>16), sizebytes[1]);
+    assertEquals((byte) (size>>> 8), sizebytes[2]);
+    assertEquals((byte)  size      , sizebytes[3]);
+  }
+
+  public static class FaultyFSDirectory extends Directory {
+
+    Directory fsDir;
+    public FaultyFSDirectory(File dir) throws IOException {
+      fsDir = newFSDirectory(dir);
+      lockFactory = fsDir.getLockFactory();
+    }
+    @Override
+    public IndexInput openInput(String name) throws IOException {
+      return new FaultyIndexInput(fsDir.openInput(name));
+    }
+    @Override
+    public String[] listAll() throws IOException {
+      return fsDir.listAll();
+    }
+    @Override
+    public boolean fileExists(String name) throws IOException {
+      return fsDir.fileExists(name);
+    }
+    @Override
+    public long fileModified(String name) throws IOException {
+      return fsDir.fileModified(name);
+    }
+    @Override
+    @Deprecated
+    /*  @deprecated Lucene never uses this API; it will be
+     *  removed in 4.0. */
+    public void touchFile(String name) throws IOException {
+      fsDir.touchFile(name);
+    }
+    @Override
+    public void deleteFile(String name) throws IOException {
+      fsDir.deleteFile(name);
+    }
+    @Override
+    public long fileLength(String name) throws IOException {
+      return fsDir.fileLength(name);
+    }
+    @Override
+    public IndexOutput createOutput(String name) throws IOException {
+      return fsDir.createOutput(name);
+    }
+    @Override
+    public void close() throws IOException {
+      fsDir.close();
+    }
+  }
+
+  private static class FaultyIndexInput extends BufferedIndexInput {
+    IndexInput delegate;
+    static boolean doFail;
+    int count;
+    private FaultyIndexInput(IndexInput delegate) {
+      this.delegate = delegate;
+    }
+    private void simOutage() throws IOException {
+      if (doFail && count++ % 2 == 1) {
+        throw new IOException("Simulated network outage");
+      }
+    }
+    @Override
+    public void readInternal(byte[] b, int offset, int length) throws IOException {
+      simOutage();
+      delegate.readBytes(b, offset, length);
+    }
+    @Override
+    public void seekInternal(long pos) throws IOException {
+      //simOutage();
+      delegate.seek(pos);
+    }
+    @Override
+    public long length() {
+      return delegate.length();
+    }
+    @Override
+    public void close() throws IOException {
+      delegate.close();
+    }
+    @Override
+    public Object clone() {
+      return new FaultyIndexInput((IndexInput) delegate.clone());
+    }
+  }
+
+  // LUCENE-1262
+  public void testExceptions() throws Throwable {
+    File indexDir = _TestUtil.getTempDir("testfieldswriterexceptions");
+
+    try {
+      Directory dir = new FaultyFSDirectory(indexDir);
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( 
+          TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
+      for(int i=0;i<2;i++)
+        writer.addDocument(testDoc);
+      writer.optimize();
+      writer.close();
+
+      IndexReader reader = IndexReader.open(dir, true);
+
+      FaultyIndexInput.doFail = true;
+
+      boolean exc = false;
+
+      for(int i=0;i<2;i++) {
+        try {
+          reader.document(i);
+        } catch (IOException ioe) {
+          // expected
+          exc = true;
+        }
+        try {
+          reader.document(i);
+        } catch (IOException ioe) {
+          // expected
+          exc = true;
+        }
+      }
+      assertTrue(exc);
+      reader.close();
+      dir.close();
+    } finally {
+      _TestUtil.rmDir(indexDir);
+    }
+
+  }
+  
+  public void testNumericField() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random, dir);
+    final int numDocs = atLeast(500);
+    final Number[] answers = new Number[numDocs];
+    final NumericField.DataType[] typeAnswers = new NumericField.DataType[numDocs];
+    for(int id=0;id<numDocs;id++) {
+      Document doc = new Document();
+      NumericField nf = new NumericField("nf", Field.Store.YES, false);
+      doc.add(nf);
+      final Number answer;
+      final NumericField.DataType typeAnswer;
+      if (random.nextBoolean()) {
+        // float/double
+        if (random.nextBoolean()) {
+          final float f = random.nextFloat();
+          nf.setFloatValue(f);
+          answer = Float.valueOf(f);
+          typeAnswer = NumericField.DataType.FLOAT;
+        } else {
+          final double d = random.nextDouble();
+          nf.setDoubleValue(d);
+          answer = Double.valueOf(d);
+          typeAnswer = NumericField.DataType.DOUBLE;
+        }
+      } else {
+        // int/long
+        if (random.nextBoolean()) {
+          final int i = random.nextInt();
+          nf.setIntValue(i);
+          answer = Integer.valueOf(i);
+          typeAnswer = NumericField.DataType.INT;
+        } else {
+          final long l = random.nextLong();
+          nf.setLongValue(l);
+          answer = Long.valueOf(l);
+          typeAnswer = NumericField.DataType.LONG;
+        }
+      }
+      answers[id] = answer;
+      typeAnswers[id] = typeAnswer;
+      doc.add(new NumericField("id", Integer.MAX_VALUE, Field.Store.NO, true).setIntValue(id));
+      w.addDocument(doc);
+    }
+    final IndexReader r = w.getReader();
+    w.close();
+    
+    assertEquals(numDocs, r.numDocs());
+
+    for(IndexReader sub : r.getSequentialSubReaders()) {
+      final int[] ids = FieldCache.DEFAULT.getInts(sub, "id");
+      for(int docID=0;docID<sub.numDocs();docID++) {
+        final Document doc = sub.document(docID);
+        final Fieldable f = doc.getFieldable("nf");
+        assertTrue("got f=" + f, f instanceof NumericField);
+        final NumericField nf = (NumericField) f;
+        assertEquals(answers[ids[docID]], nf.getNumericValue());
+        assertSame(typeAnswers[ids[docID]], nf.getDataType());
+      }
+    }
+    r.close();
+    dir.close();
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestFilterIndexReader.java b/lucene/backwards/src/test/org/apache/lucene/index/TestFilterIndexReader.java
new file mode 100644
index 0000000..9609689
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestFilterIndexReader.java
@@ -0,0 +1,139 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import org.apache.lucene.util.LuceneTestCase;
+import junit.framework.TestSuite;
+import junit.textui.TestRunner;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+
+import java.io.IOException;
+
+public class TestFilterIndexReader extends LuceneTestCase {
+
+  private static class TestReader extends FilterIndexReader {
+
+     /** Filter that only permits terms containing 'e'.*/
+    private static class TestTermEnum extends FilterTermEnum {
+      public TestTermEnum(TermEnum termEnum) {
+        super(termEnum);
+      }
+
+      /** Scan for terms containing the letter 'e'.*/
+      @Override
+      public boolean next() throws IOException {
+        while (in.next()) {
+          if (in.term().text().indexOf('e') != -1)
+            return true;
+        }
+        return false;
+      }
+    }
+    
+    /** Filter that only returns odd numbered documents. */
+    private static class TestTermPositions extends FilterTermPositions {
+      public TestTermPositions(TermPositions in) {
+        super(in);
+      }
+
+      /** Scan for odd numbered documents. */
+      @Override
+      public boolean next() throws IOException {
+        while (in.next()) {
+          if ((in.doc() % 2) == 1)
+            return true;
+        }
+        return false;
+      }
+    }
+    
+    public TestReader(IndexReader reader) {
+      super(reader);
+    }
+
+    /** Filter terms with TestTermEnum. */
+    @Override
+    public TermEnum terms() throws IOException {
+      return new TestTermEnum(in.terms());
+    }
+
+    /** Filter positions with TestTermPositions. */
+    @Override
+    public TermPositions termPositions() throws IOException {
+      return new TestTermPositions(in.termPositions());
+    }
+  }
+
+
+  /** Main for running test case by itself. */
+  public static void main(String args[]) {
+    TestRunner.run (new TestSuite(TestIndexReader.class));
+  }
+    
+  /**
+   * Tests the IndexReader.getFieldNames implementation
+   * @throws Exception on error
+   */
+  public void testFilterIndexReader() throws Exception {
+    Directory directory = newDirectory();
+    IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+
+    Document d1 = new Document();
+    d1.add(newField("default","one two", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(d1);
+
+    Document d2 = new Document();
+    d2.add(newField("default","one three", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(d2);
+
+    Document d3 = new Document();
+    d3.add(newField("default","two four", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(d3);
+
+    writer.close();
+
+    IndexReader reader = new TestReader(IndexReader.open(directory, true));
+    TermEnum terms = reader.terms();
+    while (terms.next()) {
+      assertTrue(terms.term().text().indexOf('e') != -1);
+    }
+    terms.close();
+    
+    TermPositions positions = reader.termPositions(new Term("default", "one"));
+    while (positions.next()) {
+      assertTrue((positions.doc() % 2) == 1);
+    }
+
+    int NUM_DOCS = 3;
+
+    TermDocs td = reader.termDocs(null);
+    for(int i=0;i<NUM_DOCS;i++) {
+      assertTrue(td.next());
+      assertEquals(i, td.doc());
+      assertEquals(1, td.freq());
+    }
+    td.close();
+    reader.close();
+    directory.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexCommit.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexCommit.java
new file mode 100644
index 0000000..d6482b1
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexCommit.java
@@ -0,0 +1,65 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Map;
+
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.Test;
+
+public class TestIndexCommit extends LuceneTestCase {
+
+  @Test
+  public void testEqualsHashCode() throws Exception {
+    // LUCENE-2417: equals and hashCode() impl was inconsistent
+    final Directory dir = newDirectory();
+    
+    IndexCommit ic1 = new IndexCommit() {
+      @Override public String getSegmentsFileName() { return "a"; }
+      @Override public long getVersion() { return 12; }
+      @Override public Directory getDirectory() { return dir; }
+      @Override public Collection<String> getFileNames() throws IOException { return null; }
+      @Override public void delete() {}
+      @Override public long getGeneration() { return 0; }
+      @Override public long getTimestamp() throws IOException { return 1;}
+      @Override public Map<String, String> getUserData() throws IOException { return null; }
+      @Override public boolean isDeleted() { return false; }
+      @Override public boolean isOptimized() { return false; }
+    };
+    
+    IndexCommit ic2 = new IndexCommit() {
+      @Override public String getSegmentsFileName() { return "b"; }
+      @Override public long getVersion() { return 12; }
+      @Override public Directory getDirectory() { return dir; }
+      @Override public Collection<String> getFileNames() throws IOException { return null; }
+      @Override public void delete() {}
+      @Override public long getGeneration() { return 0; }
+      @Override public long getTimestamp() throws IOException { return 1;}
+      @Override public Map<String, String> getUserData() throws IOException { return null; }
+      @Override public boolean isDeleted() { return false; }
+      @Override public boolean isOptimized() { return false; }
+    };
+
+    assertEquals(ic1, ic2);
+    assertEquals("hash codes are not equals", ic1.hashCode(), ic2.hashCode());
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexFileDeleter.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
new file mode 100644
index 0000000..2064020
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
@@ -0,0 +1,242 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+
+import java.io.*;
+import java.util.*;
+
+/*
+  Verify we can read the pre-2.1 file format, do searches
+  against it, and add documents to it.
+*/
+
+public class TestIndexFileDeleter extends LuceneTestCase {
+  
+  public void testDeleteLeftoverFiles() throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+    dir.setPreventDoubleWrite(false);
+    IndexWriterConfig conf = newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
+        .setMaxBufferedDocs(10);
+    LogMergePolicy mergePolicy = newLogMergePolicy(true, 10);
+    mergePolicy.setNoCFSRatio(1); // This test expects all of its segments to be in CFS
+    conf.setMergePolicy(mergePolicy);
+
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(10).
+            setMergePolicy(mergePolicy)
+    );
+
+    writer.setInfoStream(VERBOSE ? System.out : null);
+
+    int i;
+    for(i=0;i<35;i++) {
+      addDoc(writer, i);
+    }
+    mergePolicy.setUseCompoundFile(false);
+    for(;i<45;i++) {
+      addDoc(writer, i);
+    }
+    writer.close();
+
+    // Delete one doc so we get a .del file:
+    IndexReader reader = IndexReader.open(dir, false);
+    Term searchTerm = new Term("id", "7");
+    int delCount = reader.deleteDocuments(searchTerm);
+    assertEquals("didn't delete the right number of documents", 1, delCount);
+
+    // Set one norm so we get a .s0 file:
+    reader.setNorm(21, "content", (float) 1.5);
+    reader.close();
+
+    // Now, artificially create an extra .del file & extra
+    // .s0 file:
+    String[] files = dir.listAll();
+
+    /*
+    for(int j=0;j<files.length;j++) {
+      System.out.println(j + ": " + files[j]);
+    }
+    */
+
+    // The numbering of fields can vary depending on which
+    // JRE is in use.  On some JREs we see content bound to
+    // field 0; on others, field 1.  So, here we have to
+    // figure out which field number corresponds to
+    // "content", and then set our expected file names below
+    // accordingly:
+    CompoundFileReader cfsReader = new CompoundFileReader(dir, "_2.cfs");
+    FieldInfos fieldInfos = new FieldInfos(cfsReader, "_2.fnm");
+    int contentFieldIndex = -1;
+    for(i=0;i<fieldInfos.size();i++) {
+      FieldInfo fi = fieldInfos.fieldInfo(i);
+      if (fi.name.equals("content")) {
+        contentFieldIndex = i;
+        break;
+      }
+    }
+    cfsReader.close();
+    assertTrue("could not locate the 'content' field number in the _2.cfs segment", contentFieldIndex != -1);
+
+    String normSuffix = "s" + contentFieldIndex;
+
+    // Create a bogus separate norms file for a
+    // segment/field that actually has a separate norms file
+    // already:
+    copyFile(dir, "_2_1." + normSuffix, "_2_2." + normSuffix);
+
+    // Create a bogus separate norms file for a
+    // segment/field that actually has a separate norms file
+    // already, using the "not compound file" extension:
+    copyFile(dir, "_2_1." + normSuffix, "_2_2.f" + contentFieldIndex);
+
+    // Create a bogus separate norms file for a
+    // segment/field that does not have a separate norms
+    // file already:
+    copyFile(dir, "_2_1." + normSuffix, "_1_1." + normSuffix);
+
+    // Create a bogus separate norms file for a
+    // segment/field that does not have a separate norms
+    // file already using the "not compound file" extension:
+    copyFile(dir, "_2_1." + normSuffix, "_1_1.f" + contentFieldIndex);
+
+    // Create a bogus separate del file for a
+    // segment that already has a separate del file: 
+    copyFile(dir, "_0_1.del", "_0_2.del");
+
+    // Create a bogus separate del file for a
+    // segment that does not yet have a separate del file:
+    copyFile(dir, "_0_1.del", "_1_1.del");
+
+    // Create a bogus separate del file for a
+    // non-existent segment:
+    copyFile(dir, "_0_1.del", "_188_1.del");
+
+    // Create a bogus segment file:
+    copyFile(dir, "_0.cfs", "_188.cfs");
+
+    // Create a bogus fnm file when the CFS already exists:
+    copyFile(dir, "_0.cfs", "_0.fnm");
+    
+    // Create a deletable file:
+    copyFile(dir, "_0.cfs", "deletable");
+
+    // Create some old segments file:
+    copyFile(dir, "segments_2", "segments");
+    copyFile(dir, "segments_2", "segments_1");
+
+    // Create a bogus cfs file shadowing a non-cfs segment:
+    assertTrue(dir.fileExists("_3.fdt"));
+    assertTrue(!dir.fileExists("_3.cfs"));
+    copyFile(dir, "_1.cfs", "_3.cfs");
+    
+    String[] filesPre = dir.listAll();
+
+    // Open & close a writer: it should delete the above 4
+    // files and nothing more:
+    writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+    writer.close();
+
+    String[] files2 = dir.listAll();
+    dir.close();
+
+    Arrays.sort(files);
+    Arrays.sort(files2);
+    
+    Set<String> dif = difFiles(files, files2);
+    
+    if (!Arrays.equals(files, files2)) {
+      fail("IndexFileDeleter failed to delete unreferenced extra files: should have deleted " + (filesPre.length-files.length) + " files but only deleted " + (filesPre.length - files2.length) + "; expected files:\n    " + asString(files) + "\n  actual files:\n    " + asString(files2)+"\ndif: "+dif);
+    }
+  }
+
+  private static Set<String> difFiles(String[] files1, String[] files2) {
+    Set<String> set1 = new HashSet<String>();
+    Set<String> set2 = new HashSet<String>();
+    Set<String> extra = new HashSet<String>();
+    
+    for (int x=0; x < files1.length; x++) {
+      set1.add(files1[x]);
+    }
+    for (int x=0; x < files2.length; x++) {
+      set2.add(files2[x]);
+    }
+    Iterator<String> i1 = set1.iterator();
+    while (i1.hasNext()) {
+      String o = i1.next();
+      if (!set2.contains(o)) {
+        extra.add(o);
+      }
+    }
+    Iterator<String> i2 = set2.iterator();
+    while (i2.hasNext()) {
+      String o = i2.next();
+      if (!set1.contains(o)) {
+        extra.add(o);
+      }
+    }
+    return extra;
+  }
+  
+  private String asString(String[] l) {
+    String s = "";
+    for(int i=0;i<l.length;i++) {
+      if (i > 0) {
+        s += "\n    ";
+      }
+      s += l[i];
+    }
+    return s;
+  }
+
+  public void copyFile(Directory dir, String src, String dest) throws IOException {
+    IndexInput in = dir.openInput(src);
+    IndexOutput out = dir.createOutput(dest);
+    byte[] b = new byte[1024];
+    long remainder = in.length();
+    while(remainder > 0) {
+      int len = (int) Math.min(b.length, remainder);
+      in.readBytes(b, 0, len);
+      out.writeBytes(b, len);
+      remainder -= len;
+    }
+    in.close();
+    out.close();
+  }
+
+  private void addDoc(IndexWriter writer, int id) throws IOException
+  {
+    Document doc = new Document();
+    doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("id", Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED));
+    writer.addDocument(doc);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexInput.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexInput.java
new file mode 100644
index 0000000..4b82e5a
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexInput.java
@@ -0,0 +1,150 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.RAMDirectory;
+
+import java.io.IOException;
+
+public class TestIndexInput extends LuceneTestCase {
+
+  static final byte[] READ_TEST_BYTES = new byte[] { 
+    (byte) 0x80, 0x01,
+    (byte) 0xFF, 0x7F,
+    (byte) 0x80, (byte) 0x80, 0x01,
+    (byte) 0x81, (byte) 0x80, 0x01,
+    (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0x07,
+    (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0x07,
+    (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0x7F,
+    0x06, 'L', 'u', 'c', 'e', 'n', 'e',
+
+    // 2-byte UTF-8 (U+00BF "INVERTED QUESTION MARK") 
+    0x02, (byte) 0xC2, (byte) 0xBF,
+    0x0A, 'L', 'u', (byte) 0xC2, (byte) 0xBF, 
+          'c', 'e', (byte) 0xC2, (byte) 0xBF, 
+          'n', 'e',
+
+    // 3-byte UTF-8 (U+2620 "SKULL AND CROSSBONES") 
+    0x03, (byte) 0xE2, (byte) 0x98, (byte) 0xA0,
+    0x0C, 'L', 'u', (byte) 0xE2, (byte) 0x98, (byte) 0xA0,
+          'c', 'e', (byte) 0xE2, (byte) 0x98, (byte) 0xA0,
+          'n', 'e',
+
+    // surrogate pairs
+    // (U+1D11E "MUSICAL SYMBOL G CLEF")
+    // (U+1D160 "MUSICAL SYMBOL EIGHTH NOTE")
+    0x04, (byte) 0xF0, (byte) 0x9D, (byte) 0x84, (byte) 0x9E,
+    0x08, (byte) 0xF0, (byte) 0x9D, (byte) 0x84, (byte) 0x9E, 
+          (byte) 0xF0, (byte) 0x9D, (byte) 0x85, (byte) 0xA0, 
+    0x0E, 'L', 'u',
+          (byte) 0xF0, (byte) 0x9D, (byte) 0x84, (byte) 0x9E,
+          'c', 'e', 
+          (byte) 0xF0, (byte) 0x9D, (byte) 0x85, (byte) 0xA0, 
+          'n', 'e',  
+
+    // null bytes
+    0x01, 0x00,
+    0x08, 'L', 'u', 0x00, 'c', 'e', 0x00, 'n', 'e',
+  };
+  
+  private void checkReads(IndexInput is) throws IOException {
+    assertEquals(128,is.readVInt());
+    assertEquals(16383,is.readVInt());
+    assertEquals(16384,is.readVInt());
+    assertEquals(16385,is.readVInt());
+    assertEquals(Integer.MAX_VALUE, is.readVInt());
+    assertEquals((long) Integer.MAX_VALUE, is.readVLong());
+    assertEquals(Long.MAX_VALUE, is.readVLong());
+    assertEquals("Lucene",is.readString());
+
+    assertEquals("\u00BF",is.readString());
+    assertEquals("Lu\u00BFce\u00BFne",is.readString());
+
+    assertEquals("\u2620",is.readString());
+    assertEquals("Lu\u2620ce\u2620ne",is.readString());
+
+    assertEquals("\uD834\uDD1E",is.readString());
+    assertEquals("\uD834\uDD1E\uD834\uDD60",is.readString());
+    assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne",is.readString());
+    
+    assertEquals("\u0000",is.readString());
+    assertEquals("Lu\u0000ce\u0000ne",is.readString());
+  }
+
+  // this test only checks BufferedIndexInput because MockIndexInput extends BufferedIndexInput
+  public void testBufferedIndexInputRead() throws IOException {
+    final IndexInput is = new MockIndexInput(READ_TEST_BYTES);
+    checkReads(is);
+    is.close();
+  }
+
+  // this test checks the raw IndexInput methods as it uses RAMIndexInput which extends IndexInput directly
+  public void testRawIndexInputRead() throws IOException {
+    final RAMDirectory dir = new RAMDirectory();
+    final IndexOutput os = dir.createOutput("foo");
+    os.writeBytes(READ_TEST_BYTES, READ_TEST_BYTES.length);
+    os.close();
+    final IndexInput is = dir.openInput("foo");
+    checkReads(is);
+    is.close();
+    dir.close();
+  }
+
+  /**
+   * Expert
+   *
+   * @throws IOException
+   */
+  public void testSkipChars() throws IOException {
+    byte[] bytes = new byte[]{(byte) 0x80, 0x01,
+            (byte) 0xFF, 0x7F,
+            (byte) 0x80, (byte) 0x80, 0x01,
+            (byte) 0x81, (byte) 0x80, 0x01,
+            0x06, 'L', 'u', 'c', 'e', 'n', 'e',
+    };
+    String utf8Str = "\u0634\u1ea1";
+    byte [] utf8Bytes = utf8Str.getBytes("UTF-8");
+    byte [] theBytes = new byte[bytes.length + 1 + utf8Bytes.length];
+    System.arraycopy(bytes, 0, theBytes, 0, bytes.length);
+    theBytes[bytes.length] = (byte)utf8Str.length();//Add in the number of chars we are storing, which should fit in a byte for this test 
+    System.arraycopy(utf8Bytes, 0, theBytes, bytes.length + 1, utf8Bytes.length);
+    IndexInput is = new MockIndexInput(theBytes);
+    assertEquals(128, is.readVInt());
+    assertEquals(16383, is.readVInt());
+    assertEquals(16384, is.readVInt());
+    assertEquals(16385, is.readVInt());
+    int charsToRead = is.readVInt();//number of chars in the Lucene string
+    assertTrue(0x06 + " does not equal: " + charsToRead, 0x06 == charsToRead);
+    is.skipChars(3);
+    char [] chars = new char[3];//there should be 6 chars remaining
+    is.readChars(chars, 0, 3);
+    String tmpStr = new String(chars);
+    assertTrue(tmpStr + " is not equal to " + "ene", tmpStr.equals("ene" ) == true);
+    //Now read the UTF8 stuff
+    charsToRead = is.readVInt() - 1;//since we are skipping one
+    is.skipChars(1);
+    assertTrue(utf8Str.length() - 1 + " does not equal: " + charsToRead, utf8Str.length() - 1 == charsToRead);
+    chars = new char[charsToRead];
+    is.readChars(chars, 0, charsToRead);
+    tmpStr = new String(chars);
+    assertTrue(tmpStr + " is not equal to " + utf8Str.substring(1), tmpStr.equals(utf8Str.substring(1)) == true);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReader.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReader.java
new file mode 100644
index 0000000..d2d3050
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReader.java
@@ -0,0 +1,1349 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.Set;
+import java.util.SortedSet;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.document.SetBasedFieldSelector;
+import org.apache.lucene.index.IndexReader.FieldOption;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.NoSuchDirectoryException;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.store.LockReleaseFailedException;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestIndexReader extends LuceneTestCase {
+    
+    public void testCommitUserData() throws Exception {
+      Directory d = newDirectory();
+
+      Map<String,String> commitUserData = new HashMap<String,String>();
+      commitUserData.put("foo", "fighters");
+      
+      // set up writer
+      IndexWriter writer = new IndexWriter(d, newIndexWriterConfig(
+          TEST_VERSION_CURRENT, new MockAnalyzer(random))
+      .setMaxBufferedDocs(2));
+      for(int i=0;i<27;i++)
+        addDocumentWithFields(writer);
+      writer.close();
+      
+      IndexReader r = IndexReader.open(d, false);
+      r.deleteDocument(5);
+      r.flush(commitUserData);
+      r.close();
+      
+      SegmentInfos sis = new SegmentInfos();
+      sis.read(d);
+      IndexReader r2 = IndexReader.open(d, false);
+      IndexCommit c = r.getIndexCommit();
+      assertEquals(c.getUserData(), commitUserData);
+
+      assertEquals(sis.getCurrentSegmentFileName(), c.getSegmentsFileName());
+
+      assertTrue(c.equals(r.getIndexCommit()));
+
+      // Change the index
+      writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
+          new MockAnalyzer(random)).setOpenMode(
+              OpenMode.APPEND).setMaxBufferedDocs(2));
+      for(int i=0;i<7;i++)
+        addDocumentWithFields(writer);
+      writer.close();
+
+      IndexReader r3 = r2.reopen();
+      assertFalse(c.equals(r3.getIndexCommit()));
+      assertFalse(r2.getIndexCommit().isOptimized());
+      r3.close();
+
+      writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
+        new MockAnalyzer(random))
+        .setOpenMode(OpenMode.APPEND));
+      writer.optimize();
+      writer.close();
+
+      r3 = r2.reopen();
+      assertTrue(r3.getIndexCommit().isOptimized());
+      r2.close();
+      r3.close();
+      d.close();
+    }
+    
+    public void testIsCurrent() throws Exception {
+      Directory d = newDirectory();
+      IndexWriter writer = new IndexWriter(d, newIndexWriterConfig( 
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      addDocumentWithFields(writer);
+      writer.close();
+      // set up reader:
+      IndexReader reader = IndexReader.open(d, false);
+      assertTrue(reader.isCurrent());
+      // modify index by adding another document:
+      writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
+          new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+      addDocumentWithFields(writer);
+      writer.close();
+      assertFalse(reader.isCurrent());
+      // re-create index:
+      writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
+          new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
+      addDocumentWithFields(writer);
+      writer.close();
+      assertFalse(reader.isCurrent());
+      reader.close();
+      d.close();
+    }
+
+    /**
+     * Tests the IndexReader.getFieldNames implementation
+     * @throws Exception on error
+     */
+    public void testGetFieldNames() throws Exception {
+        Directory d = newDirectory();
+        // set up writer
+        IndexWriter writer = new IndexWriter(
+            d,
+            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        );
+
+        Document doc = new Document();
+        doc.add(new Field("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
+        doc.add(new Field("text","test1", Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(new Field("unindexed","test1", Field.Store.YES, Field.Index.NO));
+        doc.add(new Field("unstored","test1", Field.Store.NO, Field.Index.ANALYZED));
+        writer.addDocument(doc);
+
+        writer.close();
+        // set up reader
+        IndexReader reader = IndexReader.open(d, false);
+        Collection<String> fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
+        assertTrue(fieldNames.contains("keyword"));
+        assertTrue(fieldNames.contains("text"));
+        assertTrue(fieldNames.contains("unindexed"));
+        assertTrue(fieldNames.contains("unstored"));
+        reader.close();
+        // add more documents
+        writer = new IndexWriter(
+            d,
+            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+                setOpenMode(OpenMode.APPEND).
+                setMergePolicy(newLogMergePolicy())
+        );
+        // want to get some more segments here
+        int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
+        for (int i = 0; i < 5*mergeFactor; i++) {
+          doc = new Document();
+          doc.add(new Field("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
+          doc.add(new Field("text","test1", Field.Store.YES, Field.Index.ANALYZED));
+          doc.add(new Field("unindexed","test1", Field.Store.YES, Field.Index.NO));
+          doc.add(new Field("unstored","test1", Field.Store.NO, Field.Index.ANALYZED));
+          writer.addDocument(doc);
+        }
+        // new fields are in some different segments (we hope)
+        for (int i = 0; i < 5*mergeFactor; i++) {
+          doc = new Document();
+          doc.add(new Field("keyword2","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
+          doc.add(new Field("text2","test1", Field.Store.YES, Field.Index.ANALYZED));
+          doc.add(new Field("unindexed2","test1", Field.Store.YES, Field.Index.NO));
+          doc.add(new Field("unstored2","test1", Field.Store.NO, Field.Index.ANALYZED));
+          writer.addDocument(doc);
+        }
+        // new termvector fields
+        for (int i = 0; i < 5*mergeFactor; i++) {
+          doc = new Document();
+          doc.add(new Field("tvnot","tvnot", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
+          doc.add(new Field("termvector","termvector", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES));
+          doc.add(new Field("tvoffset","tvoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
+          doc.add(new Field("tvposition","tvposition", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
+          doc.add(newField("tvpositionoffset","tvpositionoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+          writer.addDocument(doc);
+        }
+        
+        writer.close();
+        // verify fields again
+        reader = IndexReader.open(d, false);
+        fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
+        assertEquals(13, fieldNames.size());    // the following fields
+        assertTrue(fieldNames.contains("keyword"));
+        assertTrue(fieldNames.contains("text"));
+        assertTrue(fieldNames.contains("unindexed"));
+        assertTrue(fieldNames.contains("unstored"));
+        assertTrue(fieldNames.contains("keyword2"));
+        assertTrue(fieldNames.contains("text2"));
+        assertTrue(fieldNames.contains("unindexed2"));
+        assertTrue(fieldNames.contains("unstored2"));
+        assertTrue(fieldNames.contains("tvnot"));
+        assertTrue(fieldNames.contains("termvector"));
+        assertTrue(fieldNames.contains("tvposition"));
+        assertTrue(fieldNames.contains("tvoffset"));
+        assertTrue(fieldNames.contains("tvpositionoffset"));
+        
+        // verify that only indexed fields were returned
+        fieldNames = reader.getFieldNames(IndexReader.FieldOption.INDEXED);
+        assertEquals(11, fieldNames.size());    // 6 original + the 5 termvector fields 
+        assertTrue(fieldNames.contains("keyword"));
+        assertTrue(fieldNames.contains("text"));
+        assertTrue(fieldNames.contains("unstored"));
+        assertTrue(fieldNames.contains("keyword2"));
+        assertTrue(fieldNames.contains("text2"));
+        assertTrue(fieldNames.contains("unstored2"));
+        assertTrue(fieldNames.contains("tvnot"));
+        assertTrue(fieldNames.contains("termvector"));
+        assertTrue(fieldNames.contains("tvposition"));
+        assertTrue(fieldNames.contains("tvoffset"));
+        assertTrue(fieldNames.contains("tvpositionoffset"));
+        
+        // verify that only unindexed fields were returned
+        fieldNames = reader.getFieldNames(IndexReader.FieldOption.UNINDEXED);
+        assertEquals(2, fieldNames.size());    // the following fields
+        assertTrue(fieldNames.contains("unindexed"));
+        assertTrue(fieldNames.contains("unindexed2"));
+                
+        // verify index term vector fields  
+        fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR);
+        assertEquals(1, fieldNames.size());    // 1 field has term vector only
+        assertTrue(fieldNames.contains("termvector"));
+        
+        fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION);
+        assertEquals(1, fieldNames.size());    // 4 fields are indexed with term vectors
+        assertTrue(fieldNames.contains("tvposition"));
+        
+        fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET);
+        assertEquals(1, fieldNames.size());    // 4 fields are indexed with term vectors
+        assertTrue(fieldNames.contains("tvoffset"));
+                
+        fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION_OFFSET);
+        assertEquals(1, fieldNames.size());    // 4 fields are indexed with term vectors
+        assertTrue(fieldNames.contains("tvpositionoffset"));
+        reader.close();
+        d.close();
+    }
+
+  public void testTermVectors() throws Exception {
+    Directory d = newDirectory();
+    // set up writer
+    IndexWriter writer = new IndexWriter(
+        d,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMergePolicy(newLogMergePolicy())
+    );
+    // want to get some more segments here
+    // new termvector fields
+    int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
+    for (int i = 0; i < 5 * mergeFactor; i++) {
+      Document doc = new Document();
+        doc.add(new Field("tvnot","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
+        doc.add(new Field("termvector","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES));
+        doc.add(new Field("tvoffset","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
+        doc.add(new Field("tvposition","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
+        doc.add(new Field("tvpositionoffset","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+
+        writer.addDocument(doc);
+    }
+    writer.close();
+    IndexReader reader = IndexReader.open(d, false);
+    FieldSortedTermVectorMapper mapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
+    reader.getTermFreqVector(0, mapper);
+    Map<String,SortedSet<TermVectorEntry>> map = mapper.getFieldToTerms();
+    assertTrue("map is null and it shouldn't be", map != null);
+    assertTrue("map Size: " + map.size() + " is not: " + 4, map.size() == 4);
+    Set<TermVectorEntry> set = map.get("termvector");
+    for (Iterator<TermVectorEntry> iterator = set.iterator(); iterator.hasNext();) {
+      TermVectorEntry entry =  iterator.next();
+      assertTrue("entry is null and it shouldn't be", entry != null);
+      if (VERBOSE) System.out.println("Entry: " + entry);
+    }
+    reader.close();
+    d.close();
+  }
+
+  static void assertTermDocsCount(String msg,
+                                     IndexReader reader,
+                                     Term term,
+                                     int expected)
+    throws IOException
+    {
+        TermDocs tdocs = null;
+
+        try {
+            tdocs = reader.termDocs(term);
+            assertNotNull(msg + ", null TermDocs", tdocs);
+            int count = 0;
+            while(tdocs.next()) {
+                count++;
+            }
+            assertEquals(msg + ", count mismatch", expected, count);
+
+        } finally {
+            if (tdocs != null)
+                tdocs.close();
+        }
+
+    }
+
+    
+    public void testBinaryFields() throws IOException {
+        Directory dir = newDirectory();
+        byte[] bin = new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+        
+        IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+        
+        for (int i = 0; i < 10; i++) {
+          addDoc(writer, "document number " + (i + 1));
+          addDocumentWithFields(writer);
+          addDocumentWithDifferentFields(writer);
+          addDocumentWithTermVectorFields(writer);
+        }
+        writer.close();
+        writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy()));
+        Document doc = new Document();
+        doc.add(new Field("bin1", bin));
+        doc.add(new Field("junk", "junk text", Field.Store.NO, Field.Index.ANALYZED));
+        writer.addDocument(doc);
+        writer.close();
+        IndexReader reader = IndexReader.open(dir, false);
+        doc = reader.document(reader.maxDoc() - 1);
+        Field[] fields = doc.getFields("bin1");
+        assertNotNull(fields);
+        assertEquals(1, fields.length);
+        Field b1 = fields[0];
+        assertTrue(b1.isBinary());
+        byte[] data1 = b1.getBinaryValue();
+        assertEquals(bin.length, b1.getBinaryLength());
+        for (int i = 0; i < bin.length; i++) {
+          assertEquals(bin[i], data1[i + b1.getBinaryOffset()]);
+        }
+        Set<String> lazyFields = new HashSet<String>();
+        lazyFields.add("bin1");
+        FieldSelector sel = new SetBasedFieldSelector(new HashSet<String>(), lazyFields);
+        doc = reader.document(reader.maxDoc() - 1, sel);
+        Fieldable[] fieldables = doc.getFieldables("bin1");
+        assertNotNull(fieldables);
+        assertEquals(1, fieldables.length);
+        Fieldable fb1 = fieldables[0];
+        assertTrue(fb1.isBinary());
+        assertEquals(bin.length, fb1.getBinaryLength());
+        data1 = fb1.getBinaryValue();
+        assertEquals(bin.length, fb1.getBinaryLength());
+        for (int i = 0; i < bin.length; i++) {
+          assertEquals(bin[i], data1[i + fb1.getBinaryOffset()]);
+        }
+        reader.close();
+        // force optimize
+
+
+        writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy()));
+        writer.optimize();
+        writer.close();
+        reader = IndexReader.open(dir, false);
+        doc = reader.document(reader.maxDoc() - 1);
+        fields = doc.getFields("bin1");
+        assertNotNull(fields);
+        assertEquals(1, fields.length);
+        b1 = fields[0];
+        assertTrue(b1.isBinary());
+        data1 = b1.getBinaryValue();
+        assertEquals(bin.length, b1.getBinaryLength());
+        for (int i = 0; i < bin.length; i++) {
+          assertEquals(bin[i], data1[i + b1.getBinaryOffset()]);
+        }
+        reader.close();
+        dir.close();
+    }
+
+    // Make sure attempts to make changes after reader is
+    // closed throws IOException:
+    public void testChangesAfterClose() throws IOException {
+        Directory dir = newDirectory();
+
+        IndexWriter writer = null;
+        IndexReader reader = null;
+        Term searchTerm = new Term("content", "aaa");
+
+        //  add 11 documents with term : aaa
+        writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+        for (int i = 0; i < 11; i++) {
+            addDoc(writer, searchTerm.text());
+        }
+        writer.close();
+
+        reader = IndexReader.open(dir, false);
+
+        // Close reader:
+        reader.close();
+
+        // Then, try to make changes:
+        try {
+          reader.deleteDocument(4);
+          fail("deleteDocument after close failed to throw IOException");
+        } catch (AlreadyClosedException e) {
+          // expected
+        }
+
+        try {
+          reader.setNorm(5, "aaa", 2.0f);
+          fail("setNorm after close failed to throw IOException");
+        } catch (AlreadyClosedException e) {
+          // expected
+        }
+
+        try {
+          reader.undeleteAll();
+          fail("undeleteAll after close failed to throw IOException");
+        } catch (AlreadyClosedException e) {
+          // expected
+        }
+        dir.close();
+    }
+
+    // Make sure we get lock obtain failed exception with 2 writers:
+    public void testLockObtainFailed() throws IOException {
+        Directory dir = newDirectory();
+
+        Term searchTerm = new Term("content", "aaa");
+
+        //  add 11 documents with term : aaa
+        IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+        writer.commit();
+        for (int i = 0; i < 11; i++) {
+            addDoc(writer, searchTerm.text());
+        }
+
+        // Create reader:
+        IndexReader reader = IndexReader.open(dir, false);
+
+        // Try to make changes
+        try {
+          reader.deleteDocument(4);
+          fail("deleteDocument should have hit LockObtainFailedException");
+        } catch (LockObtainFailedException e) {
+          // expected
+        }
+
+        try {
+          reader.setNorm(5, "aaa", 2.0f);
+          fail("setNorm should have hit LockObtainFailedException");
+        } catch (LockObtainFailedException e) {
+          // expected
+        }
+
+        try {
+          reader.undeleteAll();
+          fail("undeleteAll should have hit LockObtainFailedException");
+        } catch (LockObtainFailedException e) {
+          // expected
+        }
+        writer.close();
+        reader.close();
+        dir.close();
+    }
+
+    // Make sure you can set norms & commit even if a reader
+    // is open against the index:
+    public void testWritingNorms() throws IOException {
+        Directory dir = newDirectory();
+        IndexWriter writer;
+        IndexReader reader;
+        Term searchTerm = new Term("content", "aaa");
+
+        //  add 1 documents with term : aaa
+        writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+        addDoc(writer, searchTerm.text());
+        writer.close();
+
+        //  now open reader & set norm for doc 0
+        reader = IndexReader.open(dir, false);
+        reader.setNorm(0, "content", (float) 2.0);
+
+        // we should be holding the write lock now:
+        assertTrue("locked", IndexWriter.isLocked(dir));
+
+        reader.commit();
+
+        // we should not be holding the write lock now:
+        assertTrue("not locked", !IndexWriter.isLocked(dir));
+
+        // open a 2nd reader:
+        IndexReader reader2 = IndexReader.open(dir, false);
+
+        // set norm again for doc 0
+        reader.setNorm(0, "content", (float) 3.0);
+        assertTrue("locked", IndexWriter.isLocked(dir));
+
+        reader.close();
+
+        // we should not be holding the write lock now:
+        assertTrue("not locked", !IndexWriter.isLocked(dir));
+
+        reader2.close();
+        dir.close();
+    }
+
+
+    // Make sure you can set norms & commit, and there are
+    // no extra norms files left:
+    public void testWritingNormsNoReader() throws IOException {
+        Directory dir = newDirectory();
+        IndexWriter writer = null;
+        IndexReader reader = null;
+        Term searchTerm = new Term("content", "aaa");
+
+        //  add 1 documents with term : aaa
+        writer  = new IndexWriter(
+            dir,
+            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+                setMergePolicy(newLogMergePolicy(false))
+        );
+        addDoc(writer, searchTerm.text());
+        writer.close();
+
+        //  now open reader & set norm for doc 0 (writes to
+        //  _0_1.s0)
+        reader = IndexReader.open(dir, false);
+        reader.setNorm(0, "content", (float) 2.0);
+        reader.close();
+        
+        //  now open reader again & set norm for doc 0 (writes to _0_2.s0)
+        reader = IndexReader.open(dir, false);
+        reader.setNorm(0, "content", (float) 2.0);
+        reader.close();
+        assertFalse("failed to remove first generation norms file on writing second generation",
+                    dir.fileExists("_0_1.s0"));
+        
+        dir.close();
+    }
+
+    /* ??? public void testOpenEmptyDirectory() throws IOException{
+      String dirName = "test.empty";
+      File fileDirName = new File(dirName);
+      if (!fileDirName.exists()) {
+        fileDirName.mkdir();
+      }
+      try {
+        IndexReader.open(fileDirName);
+        fail("opening IndexReader on empty directory failed to produce FileNotFoundException");
+      } catch (FileNotFoundException e) {
+        // GOOD
+      }
+      rmDir(fileDirName);
+    }*/
+    
+  public void testFilesOpenClose() throws IOException {
+        // Create initial data set
+        File dirFile = _TestUtil.getTempDir("TestIndexReader.testFilesOpenClose");
+        Directory dir = newFSDirectory(dirFile);
+        IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+        addDoc(writer, "test");
+        writer.close();
+        dir.close();
+
+        // Try to erase the data - this ensures that the writer closed all files
+        _TestUtil.rmDir(dirFile);
+        dir = newFSDirectory(dirFile);
+
+        // Now create the data set again, just as before
+        writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
+        addDoc(writer, "test");
+        writer.close();
+        dir.close();
+
+        // Now open existing directory and test that reader closes all files
+        dir = newFSDirectory(dirFile);
+        IndexReader reader1 = IndexReader.open(dir, false);
+        reader1.close();
+        dir.close();
+
+        // The following will fail if reader did not close
+        // all files
+        _TestUtil.rmDir(dirFile);
+    }
+
+    public void testLastModified() throws Exception {
+      for(int i=0;i<2;i++) {
+        final Directory dir = newDirectory();
+        assertFalse(IndexReader.indexExists(dir));
+        IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
+        addDocumentWithFields(writer);
+        assertTrue(IndexWriter.isLocked(dir));		// writer open, so dir is locked
+        writer.close();
+        assertTrue(IndexReader.indexExists(dir));
+        IndexReader reader = IndexReader.open(dir, false);
+        assertFalse(IndexWriter.isLocked(dir));		// reader only, no lock
+        long version = IndexReader.lastModified(dir);
+        if (i == 1) {
+          long version2 = IndexReader.lastModified(dir);
+          assertEquals(version, version2);
+        }
+        reader.close();
+        // modify index and check version has been
+        // incremented:
+        Thread.sleep(1000);
+
+        writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
+        addDocumentWithFields(writer);
+        writer.close();
+        reader = IndexReader.open(dir, false);
+        assertTrue("old lastModified is " + version + "; new lastModified is " + IndexReader.lastModified(dir), version <= IndexReader.lastModified(dir));
+        reader.close();
+        dir.close();
+      }
+    }
+
+    public void testVersion() throws IOException {
+      Directory dir = newDirectory();
+      assertFalse(IndexReader.indexExists(dir));
+      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      addDocumentWithFields(writer);
+      assertTrue(IndexWriter.isLocked(dir));		// writer open, so dir is locked
+      writer.close();
+      assertTrue(IndexReader.indexExists(dir));
+      IndexReader reader = IndexReader.open(dir, false);
+      assertFalse(IndexWriter.isLocked(dir));		// reader only, no lock
+      long version = IndexReader.getCurrentVersion(dir);
+      reader.close();
+      // modify index and check version has been
+      // incremented:
+      writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
+      addDocumentWithFields(writer);
+      writer.close();
+      reader = IndexReader.open(dir, false);
+      assertTrue("old version is " + version + "; new version is " + IndexReader.getCurrentVersion(dir), version < IndexReader.getCurrentVersion(dir));
+      reader.close();
+      dir.close();
+    }
+
+    public void testLock() throws IOException {
+      Directory dir = newDirectory();
+      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      addDocumentWithFields(writer);
+      writer.close();
+      writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+      IndexReader reader = IndexReader.open(dir, false);
+      try {
+        reader.deleteDocument(0);
+        fail("expected lock");
+      } catch(IOException e) {
+        // expected exception
+      }
+      try {
+        IndexWriter.unlock(dir);		// this should not be done in the real world! 
+      } catch (LockReleaseFailedException lrfe) {
+        writer.close();
+      }
+      reader.deleteDocument(0);
+      reader.close();
+      writer.close();
+      dir.close();
+    }
+
+    public void testDocsOutOfOrderJIRA140() throws IOException {
+      Directory dir = newDirectory();      
+      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      for(int i=0;i<11;i++) {
+        addDoc(writer, "aaa");
+      }
+      writer.close();
+      IndexReader reader = IndexReader.open(dir, false);
+
+      // Try to delete an invalid docId, yet, within range
+      // of the final bits of the BitVector:
+
+      boolean gotException = false;
+      try {
+        reader.deleteDocument(11);
+      } catch (ArrayIndexOutOfBoundsException e) {
+        gotException = true;
+      }
+      reader.close();
+
+      writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+
+      // We must add more docs to get a new segment written
+      for(int i=0;i<11;i++) {
+        addDoc(writer, "aaa");
+      }
+
+      // Without the fix for LUCENE-140 this call will
+      // [incorrectly] hit a "docs out of order"
+      // IllegalStateException because above out-of-bounds
+      // deleteDocument corrupted the index:
+      writer.optimize();
+      writer.close();
+      if (!gotException) {
+        fail("delete of out-of-bounds doc number failed to hit exception");
+      }
+      dir.close();
+    }
+
+    public void testExceptionReleaseWriteLockJIRA768() throws IOException {
+
+      Directory dir = newDirectory();      
+      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      addDoc(writer, "aaa");
+      writer.close();
+
+      IndexReader reader = IndexReader.open(dir, false);
+      try {
+        reader.deleteDocument(1);
+        fail("did not hit exception when deleting an invalid doc number");
+      } catch (ArrayIndexOutOfBoundsException e) {
+        // expected
+      }
+      reader.close();
+      if (IndexWriter.isLocked(dir)) {
+        fail("write lock is still held after close");
+      }
+
+      reader = IndexReader.open(dir, false);
+      try {
+        reader.setNorm(1, "content", (float) 2.0);
+        fail("did not hit exception when calling setNorm on an invalid doc number");
+      } catch (ArrayIndexOutOfBoundsException e) {
+        // expected
+      }
+      reader.close();
+      if (IndexWriter.isLocked(dir)) {
+        fail("write lock is still held after close");
+      }
+      dir.close();
+    }
+
+    private String arrayToString(String[] l) {
+      String s = "";
+      for(int i=0;i<l.length;i++) {
+        if (i > 0) {
+          s += "\n    ";
+        }
+        s += l[i];
+      }
+      return s;
+    }
+
+    public void testOpenReaderAfterDelete() throws IOException {
+      File dirFile = _TestUtil.getTempDir("deletetest");
+      Directory dir = newFSDirectory(dirFile);
+      try {
+        IndexReader.open(dir, false);
+        fail("expected FileNotFoundException");
+      } catch (FileNotFoundException e) {
+        // expected
+      }
+
+      dirFile.delete();
+
+      // Make sure we still get a CorruptIndexException (not NPE):
+      try {
+        IndexReader.open(dir, false);
+        fail("expected FileNotFoundException");
+      } catch (FileNotFoundException e) {
+        // expected
+      }
+      
+      dir.close();
+    }
+
+    static void addDocumentWithFields(IndexWriter writer) throws IOException
+    {
+        Document doc = new Document();
+        doc.add(newField("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
+        doc.add(newField("text","test1", Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(newField("unindexed","test1", Field.Store.YES, Field.Index.NO));
+        doc.add(newField("unstored","test1", Field.Store.NO, Field.Index.ANALYZED));
+        writer.addDocument(doc);
+    }
+
+    static void addDocumentWithDifferentFields(IndexWriter writer) throws IOException
+    {
+        Document doc = new Document();
+        doc.add(newField("keyword2","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
+        doc.add(newField("text2","test1", Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(newField("unindexed2","test1", Field.Store.YES, Field.Index.NO));
+        doc.add(newField("unstored2","test1", Field.Store.NO, Field.Index.ANALYZED));
+        writer.addDocument(doc);
+    }
+
+    static void addDocumentWithTermVectorFields(IndexWriter writer) throws IOException
+    {
+        Document doc = new Document();
+        doc.add(newField("tvnot","tvnot", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
+        doc.add(newField("termvector","termvector", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES));
+        doc.add(newField("tvoffset","tvoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
+        doc.add(newField("tvposition","tvposition", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
+        doc.add(newField("tvpositionoffset","tvpositionoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+        
+        writer.addDocument(doc);
+    }
+    
+    static void addDoc(IndexWriter writer, String value) throws IOException {
+        Document doc = new Document();
+        doc.add(newField("content", value, Field.Store.NO, Field.Index.ANALYZED));
+        writer.addDocument(doc);
+    }
+
+    public static void assertIndexEquals(IndexReader index1, IndexReader index2) throws IOException {
+      assertEquals("IndexReaders have different values for numDocs.", index1.numDocs(), index2.numDocs());
+      assertEquals("IndexReaders have different values for maxDoc.", index1.maxDoc(), index2.maxDoc());
+      assertEquals("Only one IndexReader has deletions.", index1.hasDeletions(), index2.hasDeletions());
+      assertEquals("Only one index is optimized.", index1.isOptimized(), index2.isOptimized());
+      
+      // check field names
+      Collection<String> fields1 = index1.getFieldNames(FieldOption.ALL);
+      Collection<String> fields2 = index1.getFieldNames(FieldOption.ALL);
+      assertEquals("IndexReaders have different numbers of fields.", fields1.size(), fields2.size());
+      Iterator<String> it1 = fields1.iterator();
+      Iterator<String> it2 = fields1.iterator();
+      while (it1.hasNext()) {
+        assertEquals("Different field names.", it1.next(), it2.next());
+      }
+      
+      // check norms
+      it1 = fields1.iterator();
+      while (it1.hasNext()) {
+        String curField = it1.next();
+        byte[] norms1 = index1.norms(curField);
+        byte[] norms2 = index2.norms(curField);
+        if (norms1 != null && norms2 != null)
+        {
+          assertEquals(norms1.length, norms2.length);
+	        for (int i = 0; i < norms1.length; i++) {
+	          assertEquals("Norm different for doc " + i + " and field '" + curField + "'.", norms1[i], norms2[i]);
+	        }
+        }
+        else
+        {
+          assertSame(norms1, norms2);
+        }
+      }
+      
+      // check deletions
+      for (int i = 0; i < index1.maxDoc(); i++) {
+        assertEquals("Doc " + i + " only deleted in one index.", index1.isDeleted(i), index2.isDeleted(i));
+      }
+      
+      // check stored fields
+      for (int i = 0; i < index1.maxDoc(); i++) {
+        if (!index1.isDeleted(i)) {
+          Document doc1 = index1.document(i);
+          Document doc2 = index2.document(i);
+          List<Fieldable> fieldable1 = doc1.getFields();
+          List<Fieldable> fieldable2 = doc2.getFields();
+          assertEquals("Different numbers of fields for doc " + i + ".", fieldable1.size(), fieldable2.size());
+          Iterator<Fieldable> itField1 = fieldable1.iterator();
+          Iterator<Fieldable> itField2 = fieldable2.iterator();
+          while (itField1.hasNext()) {
+            Field curField1 = (Field) itField1.next();
+            Field curField2 = (Field) itField2.next();
+            assertEquals("Different fields names for doc " + i + ".", curField1.name(), curField2.name());
+            assertEquals("Different field values for doc " + i + ".", curField1.stringValue(), curField2.stringValue());
+          }          
+        }
+      }
+      
+      // check dictionary and posting lists
+      TermEnum enum1 = index1.terms();
+      TermEnum enum2 = index2.terms();
+      TermPositions tp1 = index1.termPositions();
+      TermPositions tp2 = index2.termPositions();
+      while(enum1.next()) {
+        assertTrue(enum2.next());
+        assertEquals("Different term in dictionary.", enum1.term(), enum2.term());
+        tp1.seek(enum1.term());
+        tp2.seek(enum1.term());
+        while(tp1.next()) {
+          assertTrue(tp2.next());
+          assertEquals("Different doc id in postinglist of term " + enum1.term() + ".", tp1.doc(), tp2.doc());
+          assertEquals("Different term frequence in postinglist of term " + enum1.term() + ".", tp1.freq(), tp2.freq());
+          for (int i = 0; i < tp1.freq(); i++) {
+            assertEquals("Different positions in postinglist of term " + enum1.term() + ".", tp1.nextPosition(), tp2.nextPosition());
+          }
+        }
+      }
+    }
+
+    public void testGetIndexCommit() throws IOException {
+
+      Directory d = newDirectory();
+
+      // set up writer
+      IndexWriter writer = new IndexWriter(
+          d,
+          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+              setMaxBufferedDocs(2).
+              setMergePolicy(newLogMergePolicy(10))
+      );
+      for(int i=0;i<27;i++)
+        addDocumentWithFields(writer);
+      writer.close();
+
+      SegmentInfos sis = new SegmentInfos();
+      sis.read(d);
+      IndexReader r = IndexReader.open(d, false);
+      IndexCommit c = r.getIndexCommit();
+
+      assertEquals(sis.getCurrentSegmentFileName(), c.getSegmentsFileName());
+
+      assertTrue(c.equals(r.getIndexCommit()));
+
+      // Change the index
+      writer = new IndexWriter(
+          d,
+          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+              setOpenMode(OpenMode.APPEND).
+              setMaxBufferedDocs(2).
+              setMergePolicy(newLogMergePolicy(10))
+      );
+      for(int i=0;i<7;i++)
+        addDocumentWithFields(writer);
+      writer.close();
+
+      IndexReader r2 = r.reopen();
+      assertFalse(c.equals(r2.getIndexCommit()));
+      assertFalse(r2.getIndexCommit().isOptimized());
+      r2.close();
+
+      writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
+        new MockAnalyzer(random))
+        .setOpenMode(OpenMode.APPEND));
+      writer.optimize();
+      writer.close();
+
+      r2 = r.reopen();
+      assertTrue(r2.getIndexCommit().isOptimized());
+
+      r.close();
+      r2.close();
+      d.close();
+    }      
+
+    public void testReadOnly() throws Throwable {
+      Directory d = newDirectory();
+      IndexWriter writer = new IndexWriter(d, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      addDocumentWithFields(writer);
+      writer.commit();
+      addDocumentWithFields(writer);
+      writer.close();
+
+      IndexReader r = IndexReader.open(d, true);
+      try {
+        r.deleteDocument(0);
+        fail();
+      } catch (UnsupportedOperationException uoe) {
+        // expected
+      }
+
+      writer = new IndexWriter(
+          d,
+          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+              setOpenMode(OpenMode.APPEND).
+              setMergePolicy(newLogMergePolicy(10))
+      );
+      addDocumentWithFields(writer);
+      writer.close();
+
+      // Make sure reopen is still readonly:
+      IndexReader r2 = r.reopen();
+      r.close();
+
+      assertFalse(r == r2);
+
+      try {
+        r2.deleteDocument(0);
+        fail();
+      } catch (UnsupportedOperationException uoe) {
+        // expected
+      }
+
+      writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
+        new MockAnalyzer(random))
+        .setOpenMode(OpenMode.APPEND));
+      writer.optimize();
+      writer.close();
+
+      // Make sure reopen to a single segment is still readonly:
+      IndexReader r3 = r2.reopen();
+      assertFalse(r3 == r2);
+      r2.close();
+      
+      assertFalse(r == r2);
+
+      try {
+        r3.deleteDocument(0);
+        fail();
+      } catch (UnsupportedOperationException uoe) {
+        // expected
+      }
+
+      // Make sure write lock isn't held
+      writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
+          new MockAnalyzer(random))
+      .setOpenMode(OpenMode.APPEND));
+      writer.close();
+
+      r3.close();
+      d.close();
+    }
+
+
+  // LUCENE-1474
+  public void testIndexReader() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer.addDocument(createDocument("a"));
+    writer.addDocument(createDocument("b"));
+    writer.addDocument(createDocument("c"));
+    writer.close();
+    IndexReader reader = IndexReader.open(dir, false);
+    reader.deleteDocuments(new Term("id", "a"));
+    reader.flush();
+    reader.deleteDocuments(new Term("id", "b"));
+    reader.close();
+    IndexReader.open(dir,true).close();
+    dir.close();
+  }
+
+  static Document createDocument(String id) {
+    Document doc = new Document();
+    doc.add(newField("id", id, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+    return doc;
+  }
+
+  // LUCENE-1468 -- make sure on attempting to open an
+  // IndexReader on a non-existent directory, you get a
+  // good exception
+  public void testNoDir() throws Throwable {
+    Directory dir = newFSDirectory(_TestUtil.getTempDir("doesnotexist"));
+    try {
+      IndexReader.open(dir, true);
+      fail("did not hit expected exception");
+    } catch (NoSuchDirectoryException nsde) {
+      // expected
+    }
+    dir.close();
+  }
+
+  // LUCENE-1509
+  public void testNoDupCommitFileNames() throws Throwable {
+
+    Directory dir = newDirectory();
+    
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(2));
+    writer.addDocument(createDocument("a"));
+    writer.addDocument(createDocument("a"));
+    writer.addDocument(createDocument("a"));
+    writer.close();
+    
+    Collection<IndexCommit> commits = IndexReader.listCommits(dir);
+    for (final IndexCommit commit : commits) {
+      Collection<String> files = commit.getFileNames();
+      HashSet<String> seen = new HashSet<String>();
+      for (final String fileName : files) { 
+        assertTrue("file " + fileName + " was duplicated", !seen.contains(fileName));
+        seen.add(fileName);
+      }
+    }
+
+    dir.close();
+  }
+
+  // LUCENE-1579: Ensure that on a cloned reader, segments
+  // reuse the doc values arrays in FieldCache
+  public void testFieldCacheReuseAfterClone() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
+    writer.addDocument(doc);
+    writer.close();
+
+    // Open reader
+    IndexReader r = SegmentReader.getOnlySegmentReader(dir);
+    final int[] ints = FieldCache.DEFAULT.getInts(r, "number");
+    assertEquals(1, ints.length);
+    assertEquals(17, ints[0]);
+
+    // Clone reader
+    IndexReader r2 = (IndexReader) r.clone();
+    r.close();
+    assertTrue(r2 != r);
+    final int[] ints2 = FieldCache.DEFAULT.getInts(r2, "number");
+    r2.close();
+
+    assertEquals(1, ints2.length);
+    assertEquals(17, ints2[0]);
+    assertTrue(ints == ints2);
+
+    dir.close();
+  }
+
+  // LUCENE-1579: Ensure that on a reopened reader, that any
+  // shared segments reuse the doc values arrays in
+  // FieldCache
+  public void testFieldCacheReuseAfterReopen() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMergePolicy(newLogMergePolicy(10))
+    );
+    Document doc = new Document();
+    doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
+    ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
+    writer.addDocument(doc);
+    writer.commit();
+
+    // Open reader1
+    IndexReader r = IndexReader.open(dir, false);
+    IndexReader r1 = SegmentReader.getOnlySegmentReader(r);
+    final int[] ints = FieldCache.DEFAULT.getInts(r1, "number");
+    assertEquals(1, ints.length);
+    assertEquals(17, ints[0]);
+
+    // Add new segment
+    writer.addDocument(doc);
+    writer.commit();
+
+    // Reopen reader1 --> reader2
+    IndexReader r2 = r.reopen();
+    r.close();
+    IndexReader sub0 = r2.getSequentialSubReaders()[0];
+    final int[] ints2 = FieldCache.DEFAULT.getInts(sub0, "number");
+    r2.close();
+    assertTrue(ints == ints2);
+
+    writer.close();
+    dir.close();
+  }
+
+  // LUCENE-1586: getUniqueTermCount
+  public void testUniqueTermCount() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    doc.add(newField("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    writer.addDocument(doc);
+    writer.commit();
+
+    IndexReader r = IndexReader.open(dir, false);
+    IndexReader r1 = SegmentReader.getOnlySegmentReader(r);
+    assertEquals(36, r1.getUniqueTermCount());
+    writer.addDocument(doc);
+    writer.commit();
+    IndexReader r2 = r.reopen();
+    r.close();
+    try {
+      r2.getUniqueTermCount();
+      fail("expected exception");
+    } catch (UnsupportedOperationException uoe) {
+      // expected
+    }
+    IndexReader[] subs = r2.getSequentialSubReaders();
+    for(int i=0;i<subs.length;i++) {
+      assertEquals(36, subs[i].getUniqueTermCount());
+    }
+    r2.close();
+    writer.close();
+    dir.close();
+  }
+
+  // LUCENE-1609: don't load terms index
+  public void testNoTermsIndex() throws Throwable {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    doc.add(newField("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    writer.addDocument(doc);
+    writer.close();
+
+    IndexReader r = IndexReader.open(dir, null, true, -1);
+    try {
+      r.docFreq(new Term("field", "f"));
+      fail("did not hit expected exception");
+    } catch (IllegalStateException ise) {
+      // expected
+    }
+    assertFalse(((SegmentReader) r.getSequentialSubReaders()[0]).termsIndexLoaded());
+
+    assertEquals(-1, ((SegmentReader) r.getSequentialSubReaders()[0]).getTermInfosIndexDivisor());
+    writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMergePolicy(newLogMergePolicy(10))
+    );
+    writer.addDocument(doc);
+    writer.close();
+
+    // LUCENE-1718: ensure re-open carries over no terms index:
+    IndexReader r2 = r.reopen();
+    r.close();
+    IndexReader[] subReaders = r2.getSequentialSubReaders();
+    assertEquals(2, subReaders.length);
+    for(int i=0;i<2;i++) {
+      assertFalse(((SegmentReader) subReaders[i]).termsIndexLoaded());
+    }
+    r2.close();
+    dir.close();
+  }
+
+  // LUCENE-2046
+  public void testPrepareCommitIsCurrent() throws Throwable {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( 
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer.commit();
+    Document doc = new Document();
+    writer.addDocument(doc);
+    IndexReader r = IndexReader.open(dir, true);
+    assertTrue(r.isCurrent());
+    writer.addDocument(doc);
+    writer.prepareCommit();
+    assertTrue(r.isCurrent());
+    IndexReader r2 = r.reopen();
+    assertTrue(r == r2);
+    writer.commit();
+    assertFalse(r.isCurrent());
+    writer.close();
+    r.close();
+    dir.close();
+  }
+  
+  // LUCENE-2753
+  public void testListCommits() throws Exception {
+    Directory dir = newDirectory();
+    SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( 
+        TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setIndexDeletionPolicy(sdp));
+    writer.addDocument(new Document());
+    writer.commit();
+    sdp.snapshot("c1");
+    writer.addDocument(new Document());
+    writer.commit();
+    sdp.snapshot("c2");
+    writer.addDocument(new Document());
+    writer.commit();
+    sdp.snapshot("c3");
+    writer.close();
+    long currentGen = 0;
+    for (IndexCommit ic : IndexReader.listCommits(dir)) {
+      assertTrue("currentGen=" + currentGen + " commitGen=" + ic.getGeneration(), currentGen < ic.getGeneration());
+      currentGen = ic.getGeneration();
+    }
+    dir.close();
+  }
+
+  // LUCENE-2812
+  public void testIndexExists() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer.addDocument(new Document());
+    writer.prepareCommit();
+    assertFalse(IndexReader.indexExists(dir));
+    writer.close();
+    assertTrue(IndexReader.indexExists(dir));
+    dir.close();
+  }
+
+  // LUCENE-2474
+  public void testReaderFinishedListener() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    writer.addDocument(new Document());
+    writer.commit();
+    writer.addDocument(new Document());
+    writer.commit();
+    final IndexReader reader = writer.getReader();
+    final int[] closeCount = new int[1];
+    final IndexReader.ReaderFinishedListener listener = new IndexReader.ReaderFinishedListener() {
+      public void finished(IndexReader reader) {
+        closeCount[0]++;
+      }
+    };
+
+    reader.addReaderFinishedListener(listener);
+
+    reader.close();
+
+    // Just the top reader
+    assertEquals(1, closeCount[0]);
+    writer.close();
+
+    // Now also the subs
+    assertEquals(3, closeCount[0]);
+
+    IndexReader reader2 = IndexReader.open(dir);
+    reader2.addReaderFinishedListener(listener);
+
+    closeCount[0] = 0;
+    reader2.close();
+    assertEquals(3, closeCount[0]);
+    dir.close();
+  }
+
+  public void testOOBDocID() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer.addDocument(new Document());
+    IndexReader r = writer.getReader();
+    writer.close();
+    r.document(0);
+    try {
+      r.document(1);
+      fail("did not hit exception");
+    } catch (IllegalArgumentException iae) {
+      // expected
+    }
+    r.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReaderClone.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReaderClone.java
new file mode 100644
index 0000000..9fd6e3e
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReaderClone.java
@@ -0,0 +1,502 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.SegmentNorms;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Tests cloning multiple types of readers, modifying the deletedDocs and norms
+ * and verifies copy on write semantics of the deletedDocs and norms is
+ * implemented properly
+ */
+public class TestIndexReaderClone extends LuceneTestCase {
+  
+  public void testCloneReadOnlySegmentReader() throws Exception {
+    final Directory dir1 = newDirectory();
+
+    TestIndexReaderReopen.createIndex(random, dir1, false);
+    IndexReader reader = IndexReader.open(dir1, false);
+    IndexReader readOnlyReader = reader.clone(true);
+    if (!isReadOnly(readOnlyReader)) {
+      fail("reader isn't read only");
+    }
+    if (deleteWorked(1, readOnlyReader)) {
+      fail("deleting from the original should not have worked");
+    }
+    reader.close();
+    readOnlyReader.close();
+    dir1.close();
+  }
+
+  // open non-readOnly reader1, clone to non-readOnly
+  // reader2, make sure we can change reader2
+  public void testCloneNoChangesStillReadOnly() throws Exception {
+    final Directory dir1 = newDirectory();
+
+    TestIndexReaderReopen.createIndex(random, dir1, true);
+    IndexReader r1 = IndexReader.open(dir1, false);
+    IndexReader r2 = r1.clone(false);
+    if (!deleteWorked(1, r2)) {
+      fail("deleting from the cloned should have worked");
+    }
+    r1.close();
+    r2.close();
+    dir1.close();
+  }
+  
+  // open non-readOnly reader1, clone to non-readOnly
+  // reader2, make sure we can change reader1
+  public void testCloneWriteToOrig() throws Exception {
+    final Directory dir1 = newDirectory();
+
+    TestIndexReaderReopen.createIndex(random, dir1, true);
+    IndexReader r1 = IndexReader.open(dir1, false);
+    IndexReader r2 = r1.clone(false);
+    if (!deleteWorked(1, r1)) {
+      fail("deleting from the original should have worked");
+    }
+    r1.close();
+    r2.close();
+    dir1.close();
+  }
+  
+  // open non-readOnly reader1, clone to non-readOnly
+  // reader2, make sure we can change reader2
+  public void testCloneWriteToClone() throws Exception {
+    final Directory dir1 = newDirectory();
+
+    TestIndexReaderReopen.createIndex(random, dir1, true);
+    IndexReader r1 = IndexReader.open(dir1, false);
+    IndexReader r2 = r1.clone(false);
+    if (!deleteWorked(1, r2)) {
+      fail("deleting from the original should have worked");
+    }
+    // should fail because reader1 holds the write lock
+    assertTrue("first reader should not be able to delete", !deleteWorked(1, r1));
+    r2.close();
+    // should fail because we are now stale (reader1
+    // committed changes)
+    assertTrue("first reader should not be able to delete", !deleteWorked(1, r1));
+    r1.close();
+
+    dir1.close();
+  }
+  
+  // create single-segment index, open non-readOnly
+  // SegmentReader, add docs, reopen to multireader, then do
+  // delete
+  public void testReopenSegmentReaderToMultiReader() throws Exception {
+    final Directory dir1 = newDirectory();
+
+    TestIndexReaderReopen.createIndex(random, dir1, false);
+    IndexReader reader1 = IndexReader.open(dir1, false);
+
+    TestIndexReaderReopen.modifyIndex(5, dir1);
+    
+    IndexReader reader2 = reader1.reopen();
+    assertTrue(reader1 != reader2);
+
+    assertTrue(deleteWorked(1, reader2));
+    reader1.close();
+    reader2.close();
+    dir1.close();
+  }
+
+  // open non-readOnly reader1, clone to readOnly reader2
+  public void testCloneWriteableToReadOnly() throws Exception {
+    final Directory dir1 = newDirectory();
+
+    TestIndexReaderReopen.createIndex(random, dir1, true);
+    IndexReader reader = IndexReader.open(dir1, false);
+    IndexReader readOnlyReader = reader.clone(true);
+    if (!isReadOnly(readOnlyReader)) {
+      fail("reader isn't read only");
+    }
+    if (deleteWorked(1, readOnlyReader)) {
+      fail("deleting from the original should not have worked");
+    }
+    // this readonly reader shouldn't have a write lock
+    if (readOnlyReader.hasChanges) {
+      fail("readOnlyReader has a write lock");
+    }
+    reader.close();
+    readOnlyReader.close();
+    dir1.close();
+  }
+
+  // open non-readOnly reader1, reopen to readOnly reader2
+  public void testReopenWriteableToReadOnly() throws Exception {
+    final Directory dir1 = newDirectory();
+
+    TestIndexReaderReopen.createIndex(random, dir1, true);
+    IndexReader reader = IndexReader.open(dir1, false);
+    final int docCount = reader.numDocs();
+    assertTrue(deleteWorked(1, reader));
+    assertEquals(docCount-1, reader.numDocs());
+
+    IndexReader readOnlyReader = reader.reopen(true);
+    if (!isReadOnly(readOnlyReader)) {
+      fail("reader isn't read only");
+    }
+    assertFalse(deleteWorked(1, readOnlyReader));
+    assertEquals(docCount-1, readOnlyReader.numDocs());
+    reader.close();
+    readOnlyReader.close();
+    dir1.close();
+  }
+
+  // open readOnly reader1, clone to non-readOnly reader2
+  public void testCloneReadOnlyToWriteable() throws Exception {
+    final Directory dir1 = newDirectory();
+
+    TestIndexReaderReopen.createIndex(random, dir1, true);
+    IndexReader reader1 = IndexReader.open(dir1, true);
+
+    IndexReader reader2 = reader1.clone(false);
+    if (isReadOnly(reader2)) {
+      fail("reader should not be read only");
+    }
+    assertFalse("deleting from the original reader should not have worked", deleteWorked(1, reader1));
+    // this readonly reader shouldn't yet have a write lock
+    if (reader2.hasChanges) {
+      fail("cloned reader should not have write lock");
+    }
+    assertTrue("deleting from the cloned reader should have worked", deleteWorked(1, reader2));
+    reader1.close();
+    reader2.close();
+    dir1.close();
+  }
+
+  // open non-readOnly reader1 on multi-segment index, then
+  // optimize the index, then clone to readOnly reader2
+  public void testReadOnlyCloneAfterOptimize() throws Exception {
+    final Directory dir1 = newDirectory();
+
+    TestIndexReaderReopen.createIndex(random, dir1, true);
+    IndexReader reader1 = IndexReader.open(dir1, false);
+    IndexWriter w = new IndexWriter(dir1, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    w.optimize();
+    w.close();
+    IndexReader reader2 = reader1.clone(true);
+    assertTrue(isReadOnly(reader2));
+    reader1.close();
+    reader2.close();
+    dir1.close();
+  }
+  
+  private static boolean deleteWorked(int doc, IndexReader r) {
+    boolean exception = false;
+    try {
+      // trying to delete from the original reader should throw an exception
+      r.deleteDocument(doc);
+    } catch (Exception ex) {
+      exception = true;
+    }
+    return !exception;
+  }
+  
+  public void testCloneReadOnlyDirectoryReader() throws Exception {
+    final Directory dir1 = newDirectory();
+
+    TestIndexReaderReopen.createIndex(random, dir1, true);
+    IndexReader reader = IndexReader.open(dir1, false);
+    IndexReader readOnlyReader = reader.clone(true);
+    if (!isReadOnly(readOnlyReader)) {
+      fail("reader isn't read only");
+    }
+    reader.close();
+    readOnlyReader.close();
+    dir1.close();
+  }
+
+  public static boolean isReadOnly(IndexReader r) {
+    if (r instanceof ReadOnlySegmentReader
+        || r instanceof ReadOnlyDirectoryReader)
+      return true;
+    return false;
+  }
+
+  public void testParallelReader() throws Exception {
+    final Directory dir1 = newDirectory();
+    TestIndexReaderReopen.createIndex(random, dir1, true);
+    final Directory dir2 = newDirectory();
+    TestIndexReaderReopen.createIndex(random, dir2, true);
+    IndexReader r1 = IndexReader.open(dir1, false);
+    IndexReader r2 = IndexReader.open(dir2, false);
+
+    ParallelReader pr1 = new ParallelReader();
+    pr1.add(r1);
+    pr1.add(r2);
+
+    performDefaultTests(pr1);
+    pr1.close();
+    dir1.close();
+    dir2.close();
+  }
+
+  /**
+   * 1. Get a norm from the original reader 2. Clone the original reader 3.
+   * Delete a document and set the norm of the cloned reader 4. Verify the norms
+   * are not the same on each reader 5. Verify the doc deleted is only in the
+   * cloned reader 6. Try to delete a document in the original reader, an
+   * exception should be thrown
+   * 
+   * @param r1 IndexReader to perform tests on
+   * @throws Exception
+   */
+  private void performDefaultTests(IndexReader r1) throws Exception {
+    float norm1 = Similarity.getDefault().decodeNormValue(r1.norms("field1")[4]);
+
+    IndexReader pr1Clone = (IndexReader) r1.clone();
+    pr1Clone.deleteDocument(10);
+    pr1Clone.setNorm(4, "field1", 0.5f);
+    assertTrue(Similarity.getDefault().decodeNormValue(r1.norms("field1")[4]) == norm1);
+    assertTrue(Similarity.getDefault().decodeNormValue(pr1Clone.norms("field1")[4]) != norm1);
+
+    assertTrue(!r1.isDeleted(10));
+    assertTrue(pr1Clone.isDeleted(10));
+
+    // try to update the original reader, which should throw an exception
+    try {
+      r1.deleteDocument(11);
+      fail("Tried to delete doc 11 and an exception should have been thrown");
+    } catch (Exception exception) {
+      // expectted
+    }
+    pr1Clone.close();
+  }
+
+  public void testMixedReaders() throws Exception {
+    final Directory dir1 = newDirectory();
+    TestIndexReaderReopen.createIndex(random, dir1, true);
+    final Directory dir2 = newDirectory();
+    TestIndexReaderReopen.createIndex(random, dir2, true);
+    IndexReader r1 = IndexReader.open(dir1, false);
+    IndexReader r2 = IndexReader.open(dir2, false);
+
+    MultiReader multiReader = new MultiReader(new IndexReader[] { r1, r2 });
+    performDefaultTests(multiReader);
+    multiReader.close();
+    dir1.close();
+    dir2.close();
+  }
+
+  public void testSegmentReaderUndeleteall() throws Exception {
+    final Directory dir1 = newDirectory();
+    TestIndexReaderReopen.createIndex(random, dir1, false);
+    SegmentReader origSegmentReader = SegmentReader.getOnlySegmentReader(dir1);
+    origSegmentReader.deleteDocument(10);
+    assertDelDocsRefCountEquals(1, origSegmentReader);
+    origSegmentReader.undeleteAll();
+    assertNull(origSegmentReader.deletedDocsRef);
+    origSegmentReader.close();
+    // need to test norms?
+    dir1.close();
+  }
+  
+  public void testSegmentReaderCloseReferencing() throws Exception {
+    final Directory dir1 = newDirectory();
+    TestIndexReaderReopen.createIndex(random, dir1, false);
+    SegmentReader origSegmentReader = SegmentReader.getOnlySegmentReader(dir1);
+    origSegmentReader.deleteDocument(1);
+    origSegmentReader.setNorm(4, "field1", 0.5f);
+
+    SegmentReader clonedSegmentReader = (SegmentReader) origSegmentReader
+        .clone();
+    assertDelDocsRefCountEquals(2, origSegmentReader);
+    origSegmentReader.close();
+    assertDelDocsRefCountEquals(1, origSegmentReader);
+    // check the norm refs
+    SegmentNorms norm = clonedSegmentReader.norms.get("field1");
+    assertEquals(1, norm.bytesRef().get());
+    clonedSegmentReader.close();
+    dir1.close();
+  }
+  
+  public void testSegmentReaderDelDocsReferenceCounting() throws Exception {
+    final Directory dir1 = newDirectory();
+    TestIndexReaderReopen.createIndex(random, dir1, false);
+
+    IndexReader origReader = IndexReader.open(dir1, false);
+    SegmentReader origSegmentReader = SegmentReader.getOnlySegmentReader(origReader);
+    // deletedDocsRef should be null because nothing has updated yet
+    assertNull(origSegmentReader.deletedDocsRef);
+
+    // we deleted a document, so there is now a deletedDocs bitvector and a
+    // reference to it
+    origReader.deleteDocument(1);
+    assertDelDocsRefCountEquals(1, origSegmentReader);
+
+    // the cloned segmentreader should have 2 references, 1 to itself, and 1 to
+    // the original segmentreader
+    IndexReader clonedReader = (IndexReader) origReader.clone();
+    SegmentReader clonedSegmentReader = SegmentReader.getOnlySegmentReader(clonedReader);
+    assertDelDocsRefCountEquals(2, origSegmentReader);
+    // deleting a document creates a new deletedDocs bitvector, the refs goes to
+    // 1
+    clonedReader.deleteDocument(2);
+    assertDelDocsRefCountEquals(1, origSegmentReader);
+    assertDelDocsRefCountEquals(1, clonedSegmentReader);
+
+    // make sure the deletedocs objects are different (copy
+    // on write)
+    assertTrue(origSegmentReader.deletedDocs != clonedSegmentReader.deletedDocs);
+
+    assertDocDeleted(origSegmentReader, clonedSegmentReader, 1);
+    assertTrue(!origSegmentReader.isDeleted(2)); // doc 2 should not be deleted
+                                                  // in original segmentreader
+    assertTrue(clonedSegmentReader.isDeleted(2)); // doc 2 should be deleted in
+                                                  // cloned segmentreader
+
+    // deleting a doc from the original segmentreader should throw an exception
+    try {
+      origReader.deleteDocument(4);
+      fail("expected exception");
+    } catch (LockObtainFailedException lbfe) {
+      // expected
+    }
+
+    origReader.close();
+    // try closing the original segment reader to see if it affects the
+    // clonedSegmentReader
+    clonedReader.deleteDocument(3);
+    clonedReader.flush();
+    assertDelDocsRefCountEquals(1, clonedSegmentReader);
+
+    // test a reopened reader
+    IndexReader reopenedReader = clonedReader.reopen();
+    IndexReader cloneReader2 = (IndexReader) reopenedReader.clone();
+    SegmentReader cloneSegmentReader2 = SegmentReader.getOnlySegmentReader(cloneReader2);
+    assertDelDocsRefCountEquals(2, cloneSegmentReader2);
+    clonedReader.close();
+    reopenedReader.close();
+    cloneReader2.close();
+
+    dir1.close();
+  }
+
+  // LUCENE-1648
+  public void testCloneWithDeletes() throws Throwable {
+    final Directory dir1 = newDirectory();
+    TestIndexReaderReopen.createIndex(random, dir1, false);
+    IndexReader origReader = IndexReader.open(dir1, false);
+    origReader.deleteDocument(1);
+
+    IndexReader clonedReader = (IndexReader) origReader.clone();
+    origReader.close();
+    clonedReader.close();
+
+    IndexReader r = IndexReader.open(dir1, false);
+    assertTrue(r.isDeleted(1));
+    r.close();
+    dir1.close();
+  }
+
+  // LUCENE-1648
+  public void testCloneWithSetNorm() throws Throwable {
+    final Directory dir1 = newDirectory();
+    TestIndexReaderReopen.createIndex(random, dir1, false);
+    IndexReader orig = IndexReader.open(dir1, false);
+    orig.setNorm(1, "field1", 17.0f);
+    final byte encoded = Similarity.getDefault().encodeNormValue(17.0f);
+    assertEquals(encoded, orig.norms("field1")[1]);
+
+    // the cloned segmentreader should have 2 references, 1 to itself, and 1 to
+    // the original segmentreader
+    IndexReader clonedReader = (IndexReader) orig.clone();
+    orig.close();
+    clonedReader.close();
+
+    IndexReader r = IndexReader.open(dir1, false);
+    assertEquals(encoded, r.norms("field1")[1]);
+    r.close();
+    dir1.close();
+  }
+
+  private void assertDocDeleted(SegmentReader reader, SegmentReader reader2,
+      int doc) {
+    assertEquals(reader.isDeleted(doc), reader2.isDeleted(doc));
+  }
+
+  private void assertDelDocsRefCountEquals(int refCount, SegmentReader reader) {
+    assertEquals(refCount, reader.deletedDocsRef.get());
+  }
+  
+  public void testCloneSubreaders() throws Exception {
+    final Directory dir1 = newDirectory();
+ 
+    TestIndexReaderReopen.createIndex(random, dir1, true);
+    IndexReader reader = IndexReader.open(dir1, false);
+    reader.deleteDocument(1); // acquire write lock
+    IndexReader[] subs = reader.getSequentialSubReaders();
+    assert subs.length > 1;
+    
+    IndexReader[] clones = new IndexReader[subs.length];
+    for (int x=0; x < subs.length; x++) {
+      clones[x] = (IndexReader) subs[x].clone();
+    }
+    reader.close();
+    for (int x=0; x < subs.length; x++) {
+      clones[x].close();
+    }
+    dir1.close();
+  }
+
+  public void testLucene1516Bug() throws Exception {
+    final Directory dir1 = newDirectory();
+    TestIndexReaderReopen.createIndex(random, dir1, false);
+    IndexReader r1 = IndexReader.open(dir1, false);
+    r1.incRef();
+    IndexReader r2 = r1.clone(false);
+    r1.deleteDocument(5);
+    r1.decRef();
+    
+    r1.incRef();
+    
+    r2.close();
+    r1.decRef();
+    r1.close();
+    dir1.close();
+  }
+
+  public void testCloseStoredFields() throws Exception {
+    final Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMergePolicy(newLogMergePolicy(false))
+    );
+    Document doc = new Document();
+    doc.add(newField("field", "yes it's stored", Field.Store.YES, Field.Index.ANALYZED));
+    w.addDocument(doc);
+    w.close();
+    IndexReader r1 = IndexReader.open(dir, false);
+    IndexReader r2 = r1.clone(false);
+    r1.close();
+    r2.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
new file mode 100644
index 0000000..0bce457
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
@@ -0,0 +1,326 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Random;
+
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.SegmentNorms;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Tests cloning IndexReader norms
+ */
+public class TestIndexReaderCloneNorms extends LuceneTestCase {
+
+  private class SimilarityOne extends DefaultSimilarity {
+    @Override
+    public float computeNorm(String fieldName, FieldInvertState state) {
+      // diable length norm
+      return state.getBoost();
+    }
+  }
+
+  private static final int NUM_FIELDS = 10;
+
+  private Similarity similarityOne;
+
+  private Analyzer anlzr;
+
+  private int numDocNorms;
+
+  private ArrayList<Float> norms;
+
+  private ArrayList<Float> modifiedNorms;
+
+  private float lastNorm = 0;
+
+  private float normDelta = (float) 0.001;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    similarityOne = new SimilarityOne();
+    anlzr = new MockAnalyzer(random);
+  }
+  
+  /**
+   * Test that norms values are preserved as the index is maintained. Including
+   * separate norms. Including merging indexes with seprate norms. Including
+   * optimize.
+   */
+  public void testNorms() throws IOException {
+    // test with a single index: index1
+    Directory dir1 = newDirectory();
+    IndexWriter.unlock(dir1);
+
+    norms = new ArrayList<Float>();
+    modifiedNorms = new ArrayList<Float>();
+
+    createIndex(random, dir1);
+    doTestNorms(random, dir1);
+
+    // test with a single index: index2
+    ArrayList<Float> norms1 = norms;
+    ArrayList<Float> modifiedNorms1 = modifiedNorms;
+    int numDocNorms1 = numDocNorms;
+
+    norms = new ArrayList<Float>();
+    modifiedNorms = new ArrayList<Float>();
+    numDocNorms = 0;
+
+    Directory dir2 = newDirectory();
+
+    createIndex(random, dir2);
+    doTestNorms(random, dir2);
+
+    // add index1 and index2 to a third index: index3
+    Directory dir3 = newDirectory();
+
+    createIndex(random, dir3);
+    IndexWriter iw = new IndexWriter(dir3, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.APPEND)
+                                     .setMaxBufferedDocs(5).setMergePolicy(newLogMergePolicy(3)));
+    iw.addIndexes(new Directory[] { dir1, dir2 });
+    iw.optimize();
+    iw.close();
+
+    norms1.addAll(norms);
+    norms = norms1;
+    modifiedNorms1.addAll(modifiedNorms);
+    modifiedNorms = modifiedNorms1;
+    numDocNorms += numDocNorms1;
+
+    // test with index3
+    verifyIndex(dir3);
+    doTestNorms(random, dir3);
+
+    // now with optimize
+    iw = new IndexWriter(dir3, newIndexWriterConfig( TEST_VERSION_CURRENT,
+                                                     anlzr).setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(5).setMergePolicy(newLogMergePolicy(3)));
+    iw.optimize();
+    iw.close();
+    verifyIndex(dir3);
+
+    dir1.close();
+    dir2.close();
+    dir3.close();
+  }
+
+  // try cloning and reopening the norms
+  private void doTestNorms(Random random, Directory dir) throws IOException {
+    addDocs(random, dir, 12, true);
+    IndexReader ir = IndexReader.open(dir, false);
+    verifyIndex(ir);
+    modifyNormsForF1(ir);
+    IndexReader irc = (IndexReader) ir.clone();// IndexReader.open(dir, false);//ir.clone();
+    verifyIndex(irc);
+
+    modifyNormsForF1(irc);
+
+    IndexReader irc3 = (IndexReader) irc.clone();
+    verifyIndex(irc3);
+    modifyNormsForF1(irc3);
+    verifyIndex(irc3);
+    irc3.flush();
+    irc3.close();
+    
+    irc.close();
+    ir.close();
+  }
+  
+  public void testNormsClose() throws IOException { 
+    Directory dir1 = newDirectory(); 
+    TestIndexReaderReopen.createIndex(random, dir1, false);
+    SegmentReader reader1 = SegmentReader.getOnlySegmentReader(dir1);
+    reader1.norms("field1");
+    SegmentNorms r1norm = reader1.norms.get("field1");
+    AtomicInteger r1BytesRef = r1norm.bytesRef();
+    SegmentReader reader2 = (SegmentReader)reader1.clone();
+    assertEquals(2, r1norm.bytesRef().get());
+    reader1.close();
+    assertEquals(1, r1BytesRef.get());
+    reader2.norms("field1");
+    reader2.close();
+    dir1.close();
+  }
+  
+  public void testNormsRefCounting() throws IOException { 
+    Directory dir1 = newDirectory(); 
+    TestIndexReaderReopen.createIndex(random, dir1, false);
+    IndexReader reader1 = IndexReader.open(dir1, false);
+        
+    IndexReader reader2C = (IndexReader) reader1.clone();
+    SegmentReader segmentReader2C = SegmentReader.getOnlySegmentReader(reader2C);
+    segmentReader2C.norms("field1"); // load the norms for the field
+    SegmentNorms reader2CNorm = segmentReader2C.norms.get("field1");
+    assertTrue("reader2CNorm.bytesRef()=" + reader2CNorm.bytesRef(), reader2CNorm.bytesRef().get() == 2);
+    
+    
+    
+    IndexReader reader3C = (IndexReader) reader2C.clone();
+    SegmentReader segmentReader3C = SegmentReader.getOnlySegmentReader(reader3C);
+    SegmentNorms reader3CCNorm = segmentReader3C.norms.get("field1");
+    assertEquals(3, reader3CCNorm.bytesRef().get());
+    
+    // edit a norm and the refcount should be 1
+    IndexReader reader4C = (IndexReader) reader3C.clone();
+    SegmentReader segmentReader4C = SegmentReader.getOnlySegmentReader(reader4C);
+    assertEquals(4, reader3CCNorm.bytesRef().get());
+    reader4C.setNorm(5, "field1", 0.33f);
+    
+    // generate a cannot update exception in reader1
+    try {
+      reader3C.setNorm(1, "field1", 0.99f);
+      fail("did not hit expected exception");
+    } catch (Exception ex) {
+      // expected
+    }
+    
+    // norm values should be different 
+    assertTrue(Similarity.getDefault().decodeNormValue(segmentReader3C.norms("field1")[5]) 
+    		!= Similarity.getDefault().decodeNormValue(segmentReader4C.norms("field1")[5]));
+    SegmentNorms reader4CCNorm = segmentReader4C.norms.get("field1");
+    assertEquals(3, reader3CCNorm.bytesRef().get());
+    assertEquals(1, reader4CCNorm.bytesRef().get());
+        
+    IndexReader reader5C = (IndexReader) reader4C.clone();
+    SegmentReader segmentReader5C = SegmentReader.getOnlySegmentReader(reader5C);
+    SegmentNorms reader5CCNorm = segmentReader5C.norms.get("field1");
+    reader5C.setNorm(5, "field1", 0.7f);
+    assertEquals(1, reader5CCNorm.bytesRef().get());
+
+    reader5C.close();
+    reader4C.close();
+    reader3C.close();
+    reader2C.close();
+    reader1.close();
+    dir1.close();
+  }
+  
+  private void createIndex(Random random, Directory dir) throws IOException {
+    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.CREATE)
+                                     .setMaxBufferedDocs(5).setSimilarity(similarityOne).setMergePolicy(newLogMergePolicy()));
+    setUseCompoundFile(iw.getConfig().getMergePolicy(), true);
+    setMergeFactor(iw.getConfig().getMergePolicy(), 3);
+    iw.close();
+  }
+
+  private void modifyNormsForF1(IndexReader ir) throws IOException {
+    int n = ir.maxDoc();
+    // System.out.println("modifyNormsForF1 maxDoc: "+n);
+    for (int i = 0; i < n; i += 3) { // modify for every third doc
+      int k = (i * 3) % modifiedNorms.size();
+      float origNorm =  modifiedNorms.get(i).floatValue();
+      float newNorm =  modifiedNorms.get(k).floatValue();
+      // System.out.println("Modifying: for "+i+" from "+origNorm+" to
+      // "+newNorm);
+      // System.out.println(" and: for "+k+" from "+newNorm+" to "+origNorm);
+      modifiedNorms.set(i, Float.valueOf(newNorm));
+      modifiedNorms.set(k, Float.valueOf(origNorm));
+      ir.setNorm(i, "f" + 1, newNorm);
+      ir.setNorm(k, "f" + 1, origNorm);
+      // System.out.println("setNorm i: "+i);
+      // break;
+    }
+    // ir.close();
+  }
+
+  private void verifyIndex(Directory dir) throws IOException {
+    IndexReader ir = IndexReader.open(dir, false);
+    verifyIndex(ir);
+    ir.close();
+  }
+
+  private void verifyIndex(IndexReader ir) throws IOException {
+    for (int i = 0; i < NUM_FIELDS; i++) {
+      String field = "f" + i;
+      byte b[] = ir.norms(field);
+      assertEquals("number of norms mismatches", numDocNorms, b.length);
+      ArrayList<Float> storedNorms = (i == 1 ? modifiedNorms : norms);
+      for (int j = 0; j < b.length; j++) {
+        float norm = Similarity.getDefault().decodeNormValue(b[j]);
+        float norm1 =  storedNorms.get(j).floatValue();
+        assertEquals("stored norm value of " + field + " for doc " + j + " is "
+            + norm + " - a mismatch!", norm, norm1, 0.000001);
+      }
+    }
+  }
+
+  private void addDocs(Random random, Directory dir, int ndocs, boolean compound)
+      throws IOException {
+    IndexWriterConfig conf = newIndexWriterConfig(
+            TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.APPEND)
+      .setMaxBufferedDocs(5).setSimilarity(similarityOne).setMergePolicy(newLogMergePolicy());
+    LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
+    lmp.setMergeFactor(3);
+    lmp.setUseCompoundFile(compound);
+    IndexWriter iw = new IndexWriter(dir, conf);
+    for (int i = 0; i < ndocs; i++) {
+      iw.addDocument(newDoc());
+    }
+    iw.close();
+  }
+
+  // create the next document
+  private Document newDoc() {
+    Document d = new Document();
+    float boost = nextNorm();
+    for (int i = 0; i < 10; i++) {
+      Field f = newField("f" + i, "v" + i, Store.NO, Index.NOT_ANALYZED);
+      f.setBoost(boost);
+      d.add(f);
+    }
+    return d;
+  }
+
+  // return unique norm values that are unchanged by encoding/decoding
+  private float nextNorm() {
+    float norm = lastNorm + normDelta;
+    do {
+      float norm1 = Similarity.getDefault().decodeNormValue(
+    		  Similarity.getDefault().encodeNormValue(norm));
+      if (norm1 > lastNorm) {
+        // System.out.println(norm1+" > "+lastNorm);
+        norm = norm1;
+        break;
+      }
+      norm += normDelta;
+    } while (true);
+    norms.add(numDocNorms, Float.valueOf(norm));
+    modifiedNorms.add(numDocNorms, Float.valueOf(norm));
+    // System.out.println("creating norm("+numDocNorms+"): "+norm);
+    numDocNorms++;
+    lastNorm = (norm > 10 ? 0 : norm); // there's a limit to how many distinct
+                                        // values can be stored in a ingle byte
+    return norm;
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReaderDelete.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReaderDelete.java
new file mode 100644
index 0000000..31d3575
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReaderDelete.java
@@ -0,0 +1,374 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.LuceneTestCase;
+
+import static org.apache.lucene.index.TestIndexReader.addDoc;
+import static org.apache.lucene.index.TestIndexReader.addDocumentWithFields;
+import static org.apache.lucene.index.TestIndexReader.assertTermDocsCount;
+import static org.apache.lucene.index.TestIndexReader.createDocument;
+
+public class TestIndexReaderDelete extends LuceneTestCase {
+  private void deleteReaderReaderConflict(boolean optimize) throws IOException {
+    Directory dir = newDirectory();
+
+    Term searchTerm1 = new Term("content", "aaa");
+    Term searchTerm2 = new Term("content", "bbb");
+    Term searchTerm3 = new Term("content", "ccc");
+
+    //  add 100 documents with term : aaa
+    //  add 100 documents with term : bbb
+    //  add 100 documents with term : ccc
+    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
+    for (int i = 0; i < 100; i++) {
+        addDoc(writer, searchTerm1.text());
+        addDoc(writer, searchTerm2.text());
+        addDoc(writer, searchTerm3.text());
+    }
+    if(optimize)
+      writer.optimize();
+    writer.close();
+
+    // OPEN TWO READERS
+    // Both readers get segment info as exists at this time
+    IndexReader reader1 = IndexReader.open(dir, false);
+    assertEquals("first opened", 100, reader1.docFreq(searchTerm1));
+    assertEquals("first opened", 100, reader1.docFreq(searchTerm2));
+    assertEquals("first opened", 100, reader1.docFreq(searchTerm3));
+    assertTermDocsCount("first opened", reader1, searchTerm1, 100);
+    assertTermDocsCount("first opened", reader1, searchTerm2, 100);
+    assertTermDocsCount("first opened", reader1, searchTerm3, 100);
+
+    IndexReader reader2 = IndexReader.open(dir, false);
+    assertEquals("first opened", 100, reader2.docFreq(searchTerm1));
+    assertEquals("first opened", 100, reader2.docFreq(searchTerm2));
+    assertEquals("first opened", 100, reader2.docFreq(searchTerm3));
+    assertTermDocsCount("first opened", reader2, searchTerm1, 100);
+    assertTermDocsCount("first opened", reader2, searchTerm2, 100);
+    assertTermDocsCount("first opened", reader2, searchTerm3, 100);
+
+    // DELETE DOCS FROM READER 2 and CLOSE IT
+    // delete documents containing term: aaa
+    // when the reader is closed, the segment info is updated and
+    // the first reader is now stale
+    reader2.deleteDocuments(searchTerm1);
+    assertEquals("after delete 1", 100, reader2.docFreq(searchTerm1));
+    assertEquals("after delete 1", 100, reader2.docFreq(searchTerm2));
+    assertEquals("after delete 1", 100, reader2.docFreq(searchTerm3));
+    assertTermDocsCount("after delete 1", reader2, searchTerm1, 0);
+    assertTermDocsCount("after delete 1", reader2, searchTerm2, 100);
+    assertTermDocsCount("after delete 1", reader2, searchTerm3, 100);
+    reader2.close();
+
+    // Make sure reader 1 is unchanged since it was open earlier
+    assertEquals("after delete 1", 100, reader1.docFreq(searchTerm1));
+    assertEquals("after delete 1", 100, reader1.docFreq(searchTerm2));
+    assertEquals("after delete 1", 100, reader1.docFreq(searchTerm3));
+    assertTermDocsCount("after delete 1", reader1, searchTerm1, 100);
+    assertTermDocsCount("after delete 1", reader1, searchTerm2, 100);
+    assertTermDocsCount("after delete 1", reader1, searchTerm3, 100);
+
+
+    // ATTEMPT TO DELETE FROM STALE READER
+    // delete documents containing term: bbb
+    try {
+        reader1.deleteDocuments(searchTerm2);
+        fail("Delete allowed from a stale index reader");
+    } catch (IOException e) {
+        /* success */
+    }
+
+    // RECREATE READER AND TRY AGAIN
+    reader1.close();
+    reader1 = IndexReader.open(dir, false);
+    assertEquals("reopened", 100, reader1.docFreq(searchTerm1));
+    assertEquals("reopened", 100, reader1.docFreq(searchTerm2));
+    assertEquals("reopened", 100, reader1.docFreq(searchTerm3));
+    assertTermDocsCount("reopened", reader1, searchTerm1, 0);
+    assertTermDocsCount("reopened", reader1, searchTerm2, 100);
+    assertTermDocsCount("reopened", reader1, searchTerm3, 100);
+
+    reader1.deleteDocuments(searchTerm2);
+    assertEquals("deleted 2", 100, reader1.docFreq(searchTerm1));
+    assertEquals("deleted 2", 100, reader1.docFreq(searchTerm2));
+    assertEquals("deleted 2", 100, reader1.docFreq(searchTerm3));
+    assertTermDocsCount("deleted 2", reader1, searchTerm1, 0);
+    assertTermDocsCount("deleted 2", reader1, searchTerm2, 0);
+    assertTermDocsCount("deleted 2", reader1, searchTerm3, 100);
+    reader1.close();
+
+    // Open another reader to confirm that everything is deleted
+    reader2 = IndexReader.open(dir, false);
+    assertTermDocsCount("reopened 2", reader2, searchTerm1, 0);
+    assertTermDocsCount("reopened 2", reader2, searchTerm2, 0);
+    assertTermDocsCount("reopened 2", reader2, searchTerm3, 100);
+    reader2.close();
+
+    dir.close();
+  }
+
+  private void deleteReaderWriterConflict(boolean optimize) throws IOException {
+    //Directory dir = new RAMDirectory();
+    Directory dir = newDirectory();
+
+    Term searchTerm = new Term("content", "aaa");
+    Term searchTerm2 = new Term("content", "bbb");
+
+    //  add 100 documents with term : aaa
+    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
+    for (int i = 0; i < 100; i++) {
+        addDoc(writer, searchTerm.text());
+    }
+    writer.close();
+
+    // OPEN READER AT THIS POINT - this should fix the view of the
+    // index at the point of having 100 "aaa" documents and 0 "bbb"
+    IndexReader reader = IndexReader.open(dir, false);
+    assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
+    assertEquals("first docFreq", 0, reader.docFreq(searchTerm2));
+    assertTermDocsCount("first reader", reader, searchTerm, 100);
+    assertTermDocsCount("first reader", reader, searchTerm2, 0);
+
+    // add 100 documents with term : bbb
+    writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+    for (int i = 0; i < 100; i++) {
+        addDoc(writer, searchTerm2.text());
+    }
+
+    // REQUEST OPTIMIZATION
+    // This causes a new segment to become current for all subsequent
+    // searchers. Because of this, deletions made via a previously open
+    // reader, which would be applied to that reader's segment, are lost
+    // for subsequent searchers/readers
+    if(optimize)
+      writer.optimize();
+    writer.close();
+
+    // The reader should not see the new data
+    assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
+    assertEquals("first docFreq", 0, reader.docFreq(searchTerm2));
+    assertTermDocsCount("first reader", reader, searchTerm, 100);
+    assertTermDocsCount("first reader", reader, searchTerm2, 0);
+
+
+    // DELETE DOCUMENTS CONTAINING TERM: aaa
+    // NOTE: the reader was created when only "aaa" documents were in
+    int deleted = 0;
+    try {
+        deleted = reader.deleteDocuments(searchTerm);
+        fail("Delete allowed on an index reader with stale segment information");
+    } catch (StaleReaderException e) {
+        /* success */
+    }
+
+    // Re-open index reader and try again. This time it should see
+    // the new data.
+    reader.close();
+    reader = IndexReader.open(dir, false);
+    assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
+    assertEquals("first docFreq", 100, reader.docFreq(searchTerm2));
+    assertTermDocsCount("first reader", reader, searchTerm, 100);
+    assertTermDocsCount("first reader", reader, searchTerm2, 100);
+
+    deleted = reader.deleteDocuments(searchTerm);
+    assertEquals("deleted count", 100, deleted);
+    assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
+    assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2));
+    assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
+    assertTermDocsCount("deleted termDocs", reader, searchTerm2, 100);
+    reader.close();
+
+    // CREATE A NEW READER and re-test
+    reader = IndexReader.open(dir, false);
+    assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2));
+    assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
+    assertTermDocsCount("deleted termDocs", reader, searchTerm2, 100);
+    reader.close();
+    dir.close();
+  }
+
+  public void testBasicDelete() throws IOException {
+    Directory dir = newDirectory();
+
+    IndexWriter writer = null;
+    IndexReader reader = null;
+    Term searchTerm = new Term("content", "aaa");
+
+    //  add 100 documents with term : aaa
+    writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    for (int i = 0; i < 100; i++) {
+        addDoc(writer, searchTerm.text());
+    }
+    writer.close();
+
+    // OPEN READER AT THIS POINT - this should fix the view of the
+    // index at the point of having 100 "aaa" documents and 0 "bbb"
+    reader = IndexReader.open(dir, false);
+    assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
+    assertTermDocsCount("first reader", reader, searchTerm, 100);
+    reader.close();
+
+    // DELETE DOCUMENTS CONTAINING TERM: aaa
+    int deleted = 0;
+    reader = IndexReader.open(dir, false);
+    deleted = reader.deleteDocuments(searchTerm);
+    assertEquals("deleted count", 100, deleted);
+    assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
+    assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
+
+    // open a 2nd reader to make sure first reader can
+    // commit its changes (.del) while second reader
+    // is open:
+    IndexReader reader2 = IndexReader.open(dir, false);
+    reader.close();
+
+    // CREATE A NEW READER and re-test
+    reader = IndexReader.open(dir, false);
+    assertEquals("deleted docFreq", 0, reader.docFreq(searchTerm));
+    assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
+    reader.close();
+    reader2.close();
+    dir.close();
+  }
+
+  public void testDeleteReaderReaderConflictUnoptimized() throws IOException {
+    deleteReaderReaderConflict(false);
+  }
+  
+  public void testDeleteReaderReaderConflictOptimized() throws IOException {
+    deleteReaderReaderConflict(true);
+  }
+  
+  public void testDeleteReaderWriterConflictUnoptimized() throws IOException {
+    deleteReaderWriterConflict(false);
+  }
+  
+  public void testDeleteReaderWriterConflictOptimized() throws IOException {
+    deleteReaderWriterConflict(true);
+  }
+  
+  public void testMultiReaderDeletes() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w= new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    Document doc = new Document();
+    doc.add(newField("f", "doctor", Field.Store.NO, Field.Index.NOT_ANALYZED));
+    w.addDocument(doc);
+    doc = new Document();
+    w.commit();
+    doc.add(newField("f", "who", Field.Store.NO, Field.Index.NOT_ANALYZED));
+    w.addDocument(doc);
+    IndexReader r = new SlowMultiReaderWrapper(w.getReader());
+    w.close();
+
+    assertFalse(r.hasDeletions());
+    r.close();
+
+    r = new SlowMultiReaderWrapper(IndexReader.open(dir, false));
+
+    assertFalse(r.hasDeletions());
+    assertEquals(1, r.deleteDocuments(new Term("f", "doctor")));
+    assertTrue(r.hasDeletions());
+    assertTrue(r.isDeleted(0));
+    assertEquals(1, r.deleteDocuments(new Term("f", "who")));
+    assertTrue(r.isDeleted(1));
+    r.close();
+    dir.close();
+  }
+  
+  public void testUndeleteAll() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    addDocumentWithFields(writer);
+    addDocumentWithFields(writer);
+    writer.close();
+    IndexReader reader = IndexReader.open(dir, false);
+    reader.deleteDocument(0);
+    reader.deleteDocument(1);
+    reader.undeleteAll();
+    reader.close();
+    reader = IndexReader.open(dir, false);
+    assertEquals(2, reader.numDocs());  // nothing has really been deleted thanks to undeleteAll()
+    reader.close();
+    dir.close();
+  }
+
+  public void testUndeleteAllAfterClose() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    addDocumentWithFields(writer);
+    addDocumentWithFields(writer);
+    writer.close();
+    IndexReader reader = IndexReader.open(dir, false);
+    reader.deleteDocument(0);
+    reader.close();
+    reader = IndexReader.open(dir, false);
+    reader.undeleteAll();
+    assertEquals(2, reader.numDocs());  // nothing has really been deleted thanks to undeleteAll()
+    reader.close();
+    dir.close();
+  }
+
+  public void testUndeleteAllAfterCloseThenReopen() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    addDocumentWithFields(writer);
+    addDocumentWithFields(writer);
+    writer.close();
+    IndexReader reader = IndexReader.open(dir, false);
+    reader.deleteDocument(0);
+    reader.close();
+    reader = IndexReader.open(dir, false);
+    reader.undeleteAll();
+    reader.close();
+    reader = IndexReader.open(dir, false);
+    assertEquals(2, reader.numDocs());  // nothing has really been deleted thanks to undeleteAll()
+    reader.close();
+    dir.close();
+  }
+  
+  // LUCENE-1647
+  public void testIndexReaderUnDeleteAll() throws Exception {
+    MockDirectoryWrapper dir = newDirectory();
+    dir.setPreventDoubleWrite(false);
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer.addDocument(createDocument("a"));
+    writer.addDocument(createDocument("b"));
+    writer.addDocument(createDocument("c"));
+    writer.close();
+    IndexReader reader = IndexReader.open(dir, false);
+    reader.deleteDocuments(new Term("id", "a"));
+    reader.flush();
+    reader.deleteDocuments(new Term("id", "b"));
+    reader.undeleteAll();
+    reader.deleteDocuments(new Term("id", "b"));
+    reader.close();
+    IndexReader.open(dir,true).close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java
new file mode 100644
index 0000000..d17457f
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java
@@ -0,0 +1,229 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestIndexReaderOnDiskFull extends LuceneTestCase {
+  /**
+   * Make sure if reader tries to commit but hits disk
+   * full that reader remains consistent and usable.
+   */
+  public void testDiskFull() throws IOException {
+
+    Term searchTerm = new Term("content", "aaa");
+    int START_COUNT = 157;
+    int END_COUNT = 144;
+    
+    // First build up a starting index:
+    MockDirectoryWrapper startDir = newDirectory();
+    IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    if (VERBOSE) {
+      System.out.println("TEST: create initial index");
+      writer.setInfoStream(System.out);
+    }
+    for(int i=0;i<157;i++) {
+      Document d = new Document();
+      d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
+      d.add(newField("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED));
+      writer.addDocument(d);
+      if (0==i%10)
+        writer.commit();
+    }
+    writer.close();
+
+    {
+      IndexReader r = IndexReader.open(startDir);
+      IndexSearcher searcher = newSearcher(r);
+      ScoreDoc[] hits = null;
+      try {
+        hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+      } catch (IOException e) {
+        e.printStackTrace();
+        fail("exception when init searching: " + e);
+      }
+      searcher.close();
+      r.close();
+    }
+
+    long diskUsage = startDir.getRecomputedActualSizeInBytes();
+    long diskFree = diskUsage+_TestUtil.nextInt(random, 50, 200);
+
+    IOException err = null;
+
+    boolean done = false;
+    boolean gotExc = false;
+
+    // Iterate w/ ever increasing free disk space:
+    while(!done) {
+      MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir));
+
+      // If IndexReader hits disk full, it can write to
+      // the same files again.
+      dir.setPreventDoubleWrite(false);
+
+      IndexReader reader = IndexReader.open(dir, false);
+
+      // For each disk size, first try to commit against
+      // dir that will hit random IOExceptions & disk
+      // full; after, give it infinite disk space & turn
+      // off random IOExceptions & retry w/ same reader:
+      boolean success = false;
+
+      for(int x=0;x<2;x++) {
+
+        double rate = 0.05;
+        double diskRatio = ((double) diskFree)/diskUsage;
+        long thisDiskFree;
+        String testName;
+
+        if (0 == x) {
+          thisDiskFree = diskFree;
+          if (diskRatio >= 2.0) {
+            rate /= 2;
+          }
+          if (diskRatio >= 4.0) {
+            rate /= 2;
+          }
+          if (diskRatio >= 6.0) {
+            rate = 0.0;
+          }
+          if (VERBOSE) {
+            System.out.println("\ncycle: " + diskFree + " bytes");
+          }
+          testName = "disk full during reader.close() @ " + thisDiskFree + " bytes";
+        } else {
+          thisDiskFree = 0;
+          rate = 0.0;
+          if (VERBOSE) {
+            System.out.println("\ncycle: same writer: unlimited disk space");
+          }
+          testName = "reader re-use after disk full";
+        }
+
+        dir.setMaxSizeInBytes(thisDiskFree);
+        dir.setRandomIOExceptionRate(rate);
+        Similarity sim = new DefaultSimilarity();
+        try {
+          if (0 == x) {
+            int docId = 12;
+            for(int i=0;i<13;i++) {
+              reader.deleteDocument(docId);
+              reader.setNorm(docId, "content", sim.encodeNormValue(2.0f));
+              docId += 12;
+            }
+          }
+          reader.close();
+          success = true;
+          if (0 == x) {
+            done = true;
+          }
+        } catch (IOException e) {
+          if (VERBOSE) {
+            System.out.println("  hit IOException: " + e);
+            e.printStackTrace(System.out);
+          }
+          err = e;
+          gotExc = true;
+          if (1 == x) {
+            e.printStackTrace();
+            fail(testName + " hit IOException after disk space was freed up");
+          }
+        }
+
+        // Finally, verify index is not corrupt, and, if
+        // we succeeded, we see all docs changed, and if
+        // we failed, we see either all docs or no docs
+        // changed (transactional semantics):
+        IndexReader newReader = null;
+        try {
+          newReader = IndexReader.open(dir, false);
+        } catch (IOException e) {
+          e.printStackTrace();
+          fail(testName + ":exception when creating IndexReader after disk full during close: " + e);
+        }
+        /*
+        int result = newReader.docFreq(searchTerm);
+        if (success) {
+          if (result != END_COUNT) {
+            fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT);
+          }
+        } else {
+          // On hitting exception we still may have added
+          // all docs:
+          if (result != START_COUNT && result != END_COUNT) {
+            err.printStackTrace();
+            fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
+          }
+        }
+        */
+
+        IndexSearcher searcher = newSearcher(newReader);
+        ScoreDoc[] hits = null;
+        try {
+          hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+        } catch (IOException e) {
+          e.printStackTrace();
+          fail(testName + ": exception when searching: " + e);
+        }
+        int result2 = hits.length;
+        if (success) {
+          if (result2 != END_COUNT) {
+            fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT);
+          }
+        } else {
+          // On hitting exception we still may have added
+          // all docs:
+          if (result2 != START_COUNT && result2 != END_COUNT) {
+            err.printStackTrace();
+            fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT);
+          }
+        }
+
+        searcher.close();
+        newReader.close();
+
+        if (result2 == END_COUNT) {
+          if (!gotExc)
+            fail("never hit disk full");
+          break;
+        }
+      }
+
+      dir.close();
+
+      // Try again with more bytes of free space:
+      diskFree += TEST_NIGHTLY ? _TestUtil.nextInt(random, 5, 20) : _TestUtil.nextInt(random, 50, 200);
+    }
+
+    startDir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReaderReopen.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
new file mode 100644
index 0000000..1cd5142
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
@@ -0,0 +1,1273 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BitVector;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestIndexReaderReopen extends LuceneTestCase {
+  
+  public void testReopen() throws Exception {
+    final Directory dir1 = newDirectory();
+    
+    createIndex(random, dir1, false);
+    performDefaultTests(new TestReopen() {
+
+      @Override
+      protected void modifyIndex(int i) throws IOException {
+        TestIndexReaderReopen.modifyIndex(i, dir1);
+      }
+
+      @Override
+      protected IndexReader openReader() throws IOException {
+        return IndexReader.open(dir1, false);
+      }
+      
+    });
+    dir1.close();
+    
+    final Directory dir2 = newDirectory();
+    
+    createIndex(random, dir2, true);
+    performDefaultTests(new TestReopen() {
+
+      @Override
+      protected void modifyIndex(int i) throws IOException {
+        TestIndexReaderReopen.modifyIndex(i, dir2);
+      }
+
+      @Override
+      protected IndexReader openReader() throws IOException {
+        return IndexReader.open(dir2, false);
+      }
+      
+    });
+    dir2.close();
+  }
+  
+  public void testParallelReaderReopen() throws Exception {
+    final Directory dir1 = newDirectory();
+    createIndex(random, dir1, true);
+    final Directory dir2 = newDirectory();
+    createIndex(random, dir2, true);
+    
+    performDefaultTests(new TestReopen() {
+
+      @Override
+      protected void modifyIndex(int i) throws IOException {
+        TestIndexReaderReopen.modifyIndex(i, dir1);
+        TestIndexReaderReopen.modifyIndex(i, dir2);
+      }
+
+      @Override
+      protected IndexReader openReader() throws IOException {
+        ParallelReader pr = new ParallelReader();
+        pr.add(IndexReader.open(dir1, false));
+        pr.add(IndexReader.open(dir2, false));
+        return pr;
+      }
+      
+    });
+    dir1.close();
+    dir2.close();
+    
+    final Directory dir3 = newDirectory();
+    createIndex(random, dir3, true);
+    final Directory dir4 = newDirectory();
+    createIndex(random, dir4, true);
+
+    performTestsWithExceptionInReopen(new TestReopen() {
+
+      @Override
+      protected void modifyIndex(int i) throws IOException {
+        TestIndexReaderReopen.modifyIndex(i, dir3);
+        TestIndexReaderReopen.modifyIndex(i, dir4);
+      }
+
+      @Override
+      protected IndexReader openReader() throws IOException {
+        ParallelReader pr = new ParallelReader();
+        pr.add(IndexReader.open(dir3, false));
+        pr.add(IndexReader.open(dir4, false));
+        // Does not implement reopen, so
+        // hits exception:
+        pr.add(new FilterIndexReader(IndexReader.open(dir3, false)));
+        return pr;
+      }
+      
+    });
+    dir3.close();
+    dir4.close();
+  }
+
+  // LUCENE-1228: IndexWriter.commit() does not update the index version
+  // populate an index in iterations.
+  // at the end of every iteration, commit the index and reopen/recreate the reader.
+  // in each iteration verify the work of previous iteration. 
+  // try this once with reopen once recreate, on both RAMDir and FSDir.
+  public void testCommitReopen () throws IOException {
+    Directory dir = newDirectory();
+    doTestReopenWithCommit(random, dir, true);
+    dir.close();
+  }
+  public void testCommitRecreate () throws IOException {
+    Directory dir = newDirectory();
+    doTestReopenWithCommit(random, dir, false);
+    dir.close();
+  }
+
+  private void doTestReopenWithCommit (Random random, Directory dir, boolean withReopen) throws IOException {
+    IndexWriter iwriter = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(
+                                                              OpenMode.CREATE).setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(newLogMergePolicy()));
+    iwriter.commit();
+    IndexReader reader = IndexReader.open(dir, false);
+    try {
+      int M = 3;
+      for (int i=0; i<4; i++) {
+        for (int j=0; j<M; j++) {
+          Document doc = new Document();
+          doc.add(newField("id", i+"_"+j, Store.YES, Index.NOT_ANALYZED));
+          doc.add(newField("id2", i+"_"+j, Store.YES, Index.NOT_ANALYZED_NO_NORMS));
+          doc.add(newField("id3", i+"_"+j, Store.YES, Index.NO));
+          iwriter.addDocument(doc);
+          if (i>0) {
+            int k = i-1;
+            int n = j + k*M;
+            Document prevItereationDoc = reader.document(n);
+            assertNotNull(prevItereationDoc);
+            String id = prevItereationDoc.get("id");
+            assertEquals(k+"_"+j, id);
+          }
+        }
+        iwriter.commit();
+        if (withReopen) {
+          // reopen
+          IndexReader r2 = reader.reopen();
+          if (reader != r2) {
+            reader.close();
+            reader = r2;
+          }
+        } else {
+          // recreate
+          reader.close();
+          reader = IndexReader.open(dir, false);
+        }
+      }
+    } finally {
+      iwriter.close();
+      reader.close();
+    }
+  }
+  
+  public void testMultiReaderReopen() throws Exception {
+    final Directory dir1 = newDirectory();
+    createIndex(random, dir1, true);
+
+    final Directory dir2 = newDirectory();
+    createIndex(random, dir2, true);
+
+    performDefaultTests(new TestReopen() {
+
+      @Override
+      protected void modifyIndex(int i) throws IOException {
+        TestIndexReaderReopen.modifyIndex(i, dir1);
+        TestIndexReaderReopen.modifyIndex(i, dir2);
+      }
+
+      @Override
+      protected IndexReader openReader() throws IOException {
+        return new MultiReader(new IndexReader[] 
+                        {IndexReader.open(dir1, false), 
+                         IndexReader.open(dir2, false)});
+      }
+      
+    });
+
+    dir1.close();
+    dir2.close();
+    
+    final Directory dir3 = newDirectory();
+    createIndex(random, dir3, true);
+
+    final Directory dir4 = newDirectory();
+    createIndex(random, dir4, true);
+
+    performTestsWithExceptionInReopen(new TestReopen() {
+
+      @Override
+      protected void modifyIndex(int i) throws IOException {
+        TestIndexReaderReopen.modifyIndex(i, dir3);
+        TestIndexReaderReopen.modifyIndex(i, dir4);
+      }
+
+      @Override
+      protected IndexReader openReader() throws IOException {
+        return new MultiReader(new IndexReader[] 
+                        {IndexReader.open(dir3, false), 
+                         IndexReader.open(dir4, false),
+                         // Does not implement reopen, so
+                         // hits exception:
+                         new FilterIndexReader(IndexReader.open(dir3, false))});
+      }
+      
+    });
+    dir3.close();
+    dir4.close();
+  }
+
+  public void testMixedReaders() throws Exception {
+    final Directory dir1 = newDirectory();
+    createIndex(random, dir1, true);
+    final Directory dir2 = newDirectory();
+    createIndex(random, dir2, true);
+    final Directory dir3 = newDirectory();
+    createIndex(random, dir3, false);
+    final Directory dir4 = newDirectory();
+    createIndex(random, dir4, true);
+    final Directory dir5 = newDirectory();
+    createIndex(random, dir5, false);
+    
+    performDefaultTests(new TestReopen() {
+
+      @Override
+      protected void modifyIndex(int i) throws IOException {
+        // only change norms in this index to maintain the same number of docs for each of ParallelReader's subreaders
+        if (i == 1) TestIndexReaderReopen.modifyIndex(i, dir1);  
+        
+        TestIndexReaderReopen.modifyIndex(i, dir4);
+        TestIndexReaderReopen.modifyIndex(i, dir5);
+      }
+
+      @Override
+      protected IndexReader openReader() throws IOException {
+        ParallelReader pr = new ParallelReader();
+        pr.add(IndexReader.open(dir1, false));
+        pr.add(IndexReader.open(dir2, false));
+        MultiReader mr = new MultiReader(new IndexReader[] {
+            IndexReader.open(dir3, false), IndexReader.open(dir4, false)});
+        return new MultiReader(new IndexReader[] {
+           pr, mr, IndexReader.open(dir5, false)});
+      }
+    });
+    dir1.close();
+    dir2.close();
+    dir3.close();
+    dir4.close();
+    dir5.close();
+  }  
+  
+  private void performDefaultTests(TestReopen test) throws Exception {
+
+    IndexReader index1 = test.openReader();
+    IndexReader index2 = test.openReader();
+        
+    TestIndexReader.assertIndexEquals(index1, index2);
+
+    // verify that reopen() does not return a new reader instance
+    // in case the index has no changes
+    ReaderCouple couple = refreshReader(index2, false);
+    assertTrue(couple.refreshedReader == index2);
+    
+    couple = refreshReader(index2, test, 0, true);
+    index1.close();
+    index1 = couple.newReader;
+
+    IndexReader index2_refreshed = couple.refreshedReader;
+    index2.close();
+    
+    // test if refreshed reader and newly opened reader return equal results
+    TestIndexReader.assertIndexEquals(index1, index2_refreshed);
+
+    index2_refreshed.close();
+    assertReaderClosed(index2, true, true);
+    assertReaderClosed(index2_refreshed, true, true);
+
+    index2 = test.openReader();
+    
+    for (int i = 1; i < 4; i++) {
+      
+      index1.close();
+      couple = refreshReader(index2, test, i, true);
+      // refresh IndexReader
+      index2.close();
+      
+      index2 = couple.refreshedReader;
+      index1 = couple.newReader;
+      TestIndexReader.assertIndexEquals(index1, index2);
+    }
+    
+    index1.close();
+    index2.close();
+    assertReaderClosed(index1, true, true);
+    assertReaderClosed(index2, true, true);
+  }
+  
+  public void testReferenceCounting() throws IOException {
+    for (int mode = 0; mode < 4; mode++) {
+      Directory dir1 = newDirectory();
+      createIndex(random, dir1, true);
+     
+      IndexReader reader0 = IndexReader.open(dir1, false);
+      assertRefCountEquals(1, reader0);
+
+      assertTrue(reader0 instanceof DirectoryReader);
+      IndexReader[] subReaders0 = reader0.getSequentialSubReaders();
+      for (int i = 0; i < subReaders0.length; i++) {
+        assertRefCountEquals(1, subReaders0[i]);
+      }
+      
+      // delete first document, so that only one of the subReaders have to be re-opened
+      IndexReader modifier = IndexReader.open(dir1, false);
+      modifier.deleteDocument(0);
+      modifier.close();
+      
+      IndexReader reader1 = refreshReader(reader0, true).refreshedReader;
+      assertTrue(reader1 instanceof DirectoryReader);
+      IndexReader[] subReaders1 = reader1.getSequentialSubReaders();
+      assertEquals(subReaders0.length, subReaders1.length);
+      
+      for (int i = 0; i < subReaders0.length; i++) {
+        if (subReaders0[i] != subReaders1[i]) {
+          assertRefCountEquals(1, subReaders0[i]);
+          assertRefCountEquals(1, subReaders1[i]);
+        } else {
+          assertRefCountEquals(2, subReaders0[i]);
+        }
+      }
+
+      // delete first document, so that only one of the subReaders have to be re-opened
+      modifier = IndexReader.open(dir1, false);
+      modifier.deleteDocument(1);
+      modifier.close();
+
+      IndexReader reader2 = refreshReader(reader1, true).refreshedReader;
+      assertTrue(reader2 instanceof DirectoryReader);
+      IndexReader[] subReaders2 = reader2.getSequentialSubReaders();
+      assertEquals(subReaders1.length, subReaders2.length);
+      
+      for (int i = 0; i < subReaders2.length; i++) {
+        if (subReaders2[i] == subReaders1[i]) {
+          if (subReaders1[i] == subReaders0[i]) {
+            assertRefCountEquals(3, subReaders2[i]);
+          } else {
+            assertRefCountEquals(2, subReaders2[i]);
+          }
+        } else {
+          assertRefCountEquals(1, subReaders2[i]);
+          if (subReaders0[i] == subReaders1[i]) {
+            assertRefCountEquals(2, subReaders2[i]);
+            assertRefCountEquals(2, subReaders0[i]);
+          } else {
+            assertRefCountEquals(1, subReaders0[i]);
+            assertRefCountEquals(1, subReaders1[i]);
+          }
+        }
+      }
+      
+      IndexReader reader3 = refreshReader(reader0, true).refreshedReader;
+      assertTrue(reader3 instanceof DirectoryReader);
+      IndexReader[] subReaders3 = reader3.getSequentialSubReaders();
+      assertEquals(subReaders3.length, subReaders0.length);
+      
+      // try some permutations
+      switch (mode) {
+      case 0:
+        reader0.close();
+        reader1.close();
+        reader2.close();
+        reader3.close();
+        break;
+      case 1:
+        reader3.close();
+        reader2.close();
+        reader1.close();
+        reader0.close();
+        break;
+      case 2:
+        reader2.close();
+        reader3.close();
+        reader0.close();
+        reader1.close();
+        break;
+      case 3:
+        reader1.close();
+        reader3.close();
+        reader2.close();
+        reader0.close();
+        break;
+      }      
+      
+      assertReaderClosed(reader0, true, true);
+      assertReaderClosed(reader1, true, true);
+      assertReaderClosed(reader2, true, true);
+      assertReaderClosed(reader3, true, true);
+
+      dir1.close();
+    }
+  }
+
+
+  public void testReferenceCountingMultiReader() throws IOException {
+    for (int mode = 0; mode <=1; mode++) {
+      Directory dir1 = newDirectory();
+      createIndex(random, dir1, false);
+      Directory dir2 = newDirectory();
+      createIndex(random, dir2, true);
+      
+      IndexReader reader1 = IndexReader.open(dir1, false);
+      assertRefCountEquals(1, reader1);
+
+      IndexReader initReader2 = IndexReader.open(dir2, false);
+      IndexReader multiReader1 = new MultiReader(new IndexReader[] {reader1, initReader2}, (mode == 0));
+      modifyIndex(0, dir2);
+      assertRefCountEquals(1 + mode, reader1);
+      
+      IndexReader multiReader2 = multiReader1.reopen();
+      // index1 hasn't changed, so multiReader2 should share reader1 now with multiReader1
+      assertRefCountEquals(2 + mode, reader1);
+      
+      modifyIndex(0, dir1);
+      IndexReader reader2 = reader1.reopen();
+      assertRefCountEquals(2 + mode, reader1);
+
+      if (mode == 1) {
+        initReader2.close();
+      }
+      
+      modifyIndex(1, dir1);
+      IndexReader reader3 = reader2.reopen();
+      assertRefCountEquals(2 + mode, reader1);
+      assertRefCountEquals(1, reader2);
+      
+      multiReader1.close();
+      assertRefCountEquals(1 + mode, reader1);
+      
+      multiReader1.close();
+      assertRefCountEquals(1 + mode, reader1);
+
+      if (mode == 1) {
+        initReader2.close();
+      }
+      
+      reader1.close();
+      assertRefCountEquals(1, reader1);
+      
+      multiReader2.close();
+      assertRefCountEquals(0, reader1);
+      
+      multiReader2.close();
+      assertRefCountEquals(0, reader1);
+      
+      reader3.close();
+      assertRefCountEquals(0, reader1);
+      assertReaderClosed(reader1, true, false);
+      
+      reader2.close();
+      assertRefCountEquals(0, reader1);
+      assertReaderClosed(reader1, true, false);
+      
+      reader2.close();
+      assertRefCountEquals(0, reader1);
+      
+      reader3.close();
+      assertRefCountEquals(0, reader1);
+      assertReaderClosed(reader1, true, true);
+      dir1.close();
+      dir2.close();
+    }
+
+  }
+
+  public void testReferenceCountingParallelReader() throws IOException {
+    for (int mode = 0; mode <=1; mode++) {
+      Directory dir1 = newDirectory();
+      createIndex(random, dir1, false);
+      Directory dir2 = newDirectory();
+      createIndex(random, dir2, true);
+      
+      IndexReader reader1 = IndexReader.open(dir1, false);
+      assertRefCountEquals(1, reader1);
+      
+      ParallelReader parallelReader1 = new ParallelReader(mode == 0);
+      parallelReader1.add(reader1);
+      IndexReader initReader2 = IndexReader.open(dir2, false);
+      parallelReader1.add(initReader2);
+      modifyIndex(1, dir2);
+      assertRefCountEquals(1 + mode, reader1);
+      
+      IndexReader parallelReader2 = parallelReader1.reopen();
+      // index1 hasn't changed, so parallelReader2 should share reader1 now with multiReader1
+      assertRefCountEquals(2 + mode, reader1);
+      
+      modifyIndex(0, dir1);
+      modifyIndex(0, dir2);
+      IndexReader reader2 = reader1.reopen();
+      assertRefCountEquals(2 + mode, reader1);
+
+      if (mode == 1) {
+        initReader2.close();
+      }
+      
+      modifyIndex(4, dir1);
+      IndexReader reader3 = reader2.reopen();
+      assertRefCountEquals(2 + mode, reader1);
+      assertRefCountEquals(1, reader2);
+      
+      parallelReader1.close();
+      assertRefCountEquals(1 + mode, reader1);
+      
+      parallelReader1.close();
+      assertRefCountEquals(1 + mode, reader1);
+
+      if (mode == 1) {
+        initReader2.close();
+      }
+      
+      reader1.close();
+      assertRefCountEquals(1, reader1);
+      
+      parallelReader2.close();
+      assertRefCountEquals(0, reader1);
+      
+      parallelReader2.close();
+      assertRefCountEquals(0, reader1);
+      
+      reader3.close();
+      assertRefCountEquals(0, reader1);
+      assertReaderClosed(reader1, true, false);
+      
+      reader2.close();
+      assertRefCountEquals(0, reader1);
+      assertReaderClosed(reader1, true, false);
+      
+      reader2.close();
+      assertRefCountEquals(0, reader1);
+      
+      reader3.close();
+      assertRefCountEquals(0, reader1);
+      assertReaderClosed(reader1, true, true);
+
+      dir1.close();
+      dir2.close();
+    }
+
+  }
+  
+  public void testNormsRefCounting() throws IOException {
+    Directory dir1 = newDirectory();
+    createIndex(random, dir1, false);
+    
+    IndexReader reader1 = IndexReader.open(dir1, false);
+    SegmentReader segmentReader1 = SegmentReader.getOnlySegmentReader(reader1);
+    IndexReader modifier = IndexReader.open(dir1, false);
+    modifier.deleteDocument(0);
+    modifier.close();
+    
+    IndexReader reader2 = reader1.reopen();
+    modifier = IndexReader.open(dir1, false);
+    modifier.setNorm(1, "field1", 50);
+    modifier.setNorm(1, "field2", 50);
+    modifier.close();
+    
+    IndexReader reader3 = reader2.reopen();
+    SegmentReader segmentReader3 = SegmentReader.getOnlySegmentReader(reader3);
+    modifier = IndexReader.open(dir1, false);
+    modifier.deleteDocument(2);
+    modifier.close();
+
+    IndexReader reader4 = reader3.reopen();
+    modifier = IndexReader.open(dir1, false);
+    modifier.deleteDocument(3);
+    modifier.close();
+
+    IndexReader reader5 = reader3.reopen();
+    
+    // Now reader2-reader5 references reader1. reader1 and reader2
+    // share the same norms. reader3, reader4, reader5 also share norms.
+    assertRefCountEquals(1, reader1);
+    assertFalse(segmentReader1.normsClosed());
+
+    reader1.close();
+
+    assertRefCountEquals(0, reader1);
+    assertFalse(segmentReader1.normsClosed());
+
+    reader2.close();
+    assertRefCountEquals(0, reader1);
+
+    // now the norms for field1 and field2 should be closed
+    assertTrue(segmentReader1.normsClosed("field1"));
+    assertTrue(segmentReader1.normsClosed("field2"));
+
+    // but the norms for field3 and field4 should still be open
+    assertFalse(segmentReader1.normsClosed("field3"));
+    assertFalse(segmentReader1.normsClosed("field4"));
+    
+    reader3.close();
+    assertRefCountEquals(0, reader1);
+    assertFalse(segmentReader3.normsClosed());
+    reader5.close();
+    assertRefCountEquals(0, reader1);
+    assertFalse(segmentReader3.normsClosed());
+    reader4.close();
+    assertRefCountEquals(0, reader1);
+    
+    // and now all norms that reader1 used should be closed
+    assertTrue(segmentReader1.normsClosed());
+    
+    // now that reader3, reader4 and reader5 are closed,
+    // the norms that those three readers shared should be
+    // closed as well
+    assertTrue(segmentReader3.normsClosed());
+
+    dir1.close();
+  }
+  
+  private void performTestsWithExceptionInReopen(TestReopen test) throws Exception {
+    IndexReader index1 = test.openReader();
+    IndexReader index2 = test.openReader();
+
+    TestIndexReader.assertIndexEquals(index1, index2);
+    
+    try {
+      refreshReader(index1, test, 0, true);
+      fail("Expected exception not thrown.");
+    } catch (Exception e) {
+      // expected exception
+    }
+    
+    // index2 should still be usable and unaffected by the failed reopen() call
+    TestIndexReader.assertIndexEquals(index1, index2);
+
+    index1.close();
+    index2.close();
+  }
+  
+  public void testThreadSafety() throws Exception {
+    final Directory dir = newDirectory();
+    // NOTE: this also controls the number of threads!
+    final int n = _TestUtil.nextInt(random, 20, 40);
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    for (int i = 0; i < n; i++) {
+      writer.addDocument(createDocument(i, 3));
+    }
+    writer.optimize();
+    writer.close();
+
+    final TestReopen test = new TestReopen() {      
+      @Override
+      protected void modifyIndex(int i) throws IOException {
+        if (i % 3 == 0) {
+          IndexReader modifier = IndexReader.open(dir, false);
+          modifier.setNorm(i, "field1", 50);
+          modifier.close();
+        } else if (i % 3 == 1) {
+          IndexReader modifier = IndexReader.open(dir, false);
+          modifier.deleteDocument(i % modifier.maxDoc());
+          modifier.close();
+        } else {
+          IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig(
+              TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+          modifier.addDocument(createDocument(n + i, 6));
+          modifier.close();
+        }
+      }
+
+      @Override
+      protected IndexReader openReader() throws IOException {
+        return IndexReader.open(dir, false);
+      }      
+    };
+    
+    final List<ReaderCouple> readers = Collections.synchronizedList(new ArrayList<ReaderCouple>());
+    IndexReader firstReader = IndexReader.open(dir, false);
+    IndexReader reader = firstReader;
+    final Random rnd = random;
+    
+    ReaderThread[] threads = new ReaderThread[n];
+    final Set<IndexReader> readersToClose = Collections.synchronizedSet(new HashSet<IndexReader>());
+    
+    for (int i = 0; i < n; i++) {
+      if (i % 2 == 0) {
+        IndexReader refreshed = reader.reopen();
+        if (refreshed != reader) {
+          readersToClose.add(reader);
+        }
+        reader = refreshed;
+      }
+      final IndexReader r = reader;
+      
+      final int index = i;    
+      
+      ReaderThreadTask task;
+      
+      if (i < 4 || (i >=10 && i < 14) || i > 18) {
+        task = new ReaderThreadTask() {
+          
+          @Override
+          public void run() throws Exception {
+            while (!stopped) {
+              if (index % 2 == 0) {
+                // refresh reader synchronized
+                ReaderCouple c = (refreshReader(r, test, index, true));
+                readersToClose.add(c.newReader);
+                readersToClose.add(c.refreshedReader);
+                readers.add(c);
+                // prevent too many readers
+                break;
+              } else {
+                // not synchronized
+                IndexReader refreshed = r.reopen();
+                
+                IndexSearcher searcher = newSearcher(refreshed);
+                ScoreDoc[] hits = searcher.search(
+                    new TermQuery(new Term("field1", "a" + rnd.nextInt(refreshed.maxDoc()))),
+                    null, 1000).scoreDocs;
+                if (hits.length > 0) {
+                  searcher.doc(hits[0].doc);
+                }
+                searcher.close();
+                if (refreshed != r) {
+                  refreshed.close();
+                }
+              }
+              synchronized(this) {
+                wait(_TestUtil.nextInt(random, 1, 100));
+              }
+            }
+          }
+          
+        };
+      } else {
+        task = new ReaderThreadTask() {
+          @Override
+          public void run() throws Exception {
+            while (!stopped) {
+              int numReaders = readers.size();
+              if (numReaders > 0) {
+                ReaderCouple c =  readers.get(rnd.nextInt(numReaders));
+                TestIndexReader.assertIndexEquals(c.newReader, c.refreshedReader);
+              }
+              
+              synchronized(this) {
+                wait(_TestUtil.nextInt(random, 1, 100));
+              }
+            }
+          }
+        };
+      }
+      
+      threads[i] = new ReaderThread(task);
+      threads[i].start();
+    }
+    
+    synchronized(this) {
+      wait(1000);
+    }
+    
+    for (int i = 0; i < n; i++) {
+      if (threads[i] != null) {
+        threads[i].stopThread();
+      }
+    }
+    
+    for (int i = 0; i < n; i++) {
+      if (threads[i] != null) {
+        threads[i].join();
+        if (threads[i].error != null) {
+          String msg = "Error occurred in thread " + threads[i].getName() + ":\n" + threads[i].error.getMessage();
+          fail(msg);
+        }
+      }
+      
+    }
+    
+    for (final IndexReader readerToClose : readersToClose) {
+      readerToClose.close();
+    }
+    
+    firstReader.close();
+    reader.close();
+    
+    for (final IndexReader readerToClose : readersToClose) {
+      assertReaderClosed(readerToClose, true, true);
+    }
+
+    assertReaderClosed(reader, true, true);
+    assertReaderClosed(firstReader, true, true);
+
+    dir.close();
+  }
+  
+  private static class ReaderCouple {
+    ReaderCouple(IndexReader r1, IndexReader r2) {
+      newReader = r1;
+      refreshedReader = r2;
+    }
+    
+    IndexReader newReader;
+    IndexReader refreshedReader;
+  }
+  
+  private abstract static class ReaderThreadTask {
+    protected volatile boolean stopped;
+    public void stop() {
+      this.stopped = true;
+    }
+    
+    public abstract void run() throws Exception;
+  }
+  
+  private static class ReaderThread extends Thread {
+    private ReaderThreadTask task;
+    private Throwable error;
+    
+    
+    ReaderThread(ReaderThreadTask task) {
+      this.task = task;
+    }
+    
+    public void stopThread() {
+      this.task.stop();
+    }
+    
+    @Override
+    public void run() {
+      try {
+        this.task.run();
+      } catch (Throwable r) {
+        r.printStackTrace(System.out);
+        this.error = r;
+      }
+    }
+  }
+  
+  private Object createReaderMutex = new Object();
+  
+  private ReaderCouple refreshReader(IndexReader reader, boolean hasChanges) throws IOException {
+    return refreshReader(reader, null, -1, hasChanges);
+  }
+  
+  ReaderCouple refreshReader(IndexReader reader, TestReopen test, int modify, boolean hasChanges) throws IOException {
+    synchronized (createReaderMutex) {
+      IndexReader r = null;
+      if (test != null) {
+        test.modifyIndex(modify);
+        r = test.openReader();
+      }
+      
+      IndexReader refreshed = null;
+      try {
+        refreshed = reader.reopen();
+      } finally {
+        if (refreshed == null && r != null) {
+          // Hit exception -- close opened reader
+          r.close();
+        }
+      }
+      
+      if (hasChanges) {
+        if (refreshed == reader) {
+          fail("No new IndexReader instance created during refresh.");
+        }
+      } else {
+        if (refreshed != reader) {
+          fail("New IndexReader instance created during refresh even though index had no changes.");
+        }
+      }
+      
+      return new ReaderCouple(r, refreshed);
+    }
+  }
+  
+  public static void createIndex(Random random, Directory dir, boolean multiSegment) throws IOException {
+    IndexWriter.unlock(dir);
+    IndexWriter w = new IndexWriter(dir, LuceneTestCase.newIndexWriterConfig(random,
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMergePolicy(new LogDocMergePolicy()));
+    
+    for (int i = 0; i < 100; i++) {
+      w.addDocument(createDocument(i, 4));
+      if (multiSegment && (i % 10) == 0) {
+        w.commit();
+      }
+    }
+    
+    if (!multiSegment) {
+      w.optimize();
+    }
+    
+    w.close();
+
+    IndexReader r = IndexReader.open(dir, false);
+    if (multiSegment) {
+      assertTrue(r.getSequentialSubReaders().length > 1);
+    } else {
+      assertTrue(r.getSequentialSubReaders().length == 1);
+    }
+    r.close();
+  }
+
+  public static Document createDocument(int n, int numFields) {
+    StringBuilder sb = new StringBuilder();
+    Document doc = new Document();
+    sb.append("a");
+    sb.append(n);
+    doc.add(new Field("field1", sb.toString(), Store.YES, Index.ANALYZED));
+    doc.add(new Field("fielda", sb.toString(), Store.YES, Index.NOT_ANALYZED_NO_NORMS));
+    doc.add(new Field("fieldb", sb.toString(), Store.YES, Index.NO));
+    sb.append(" b");
+    sb.append(n);
+    for (int i = 1; i < numFields; i++) {
+      doc.add(new Field("field" + (i+1), sb.toString(), Store.YES, Index.ANALYZED));
+    }
+    return doc;
+  }
+
+  static void modifyIndex(int i, Directory dir) throws IOException {
+    switch (i) {
+      case 0: {
+        if (VERBOSE) {
+          System.out.println("TEST: modify index");
+        }
+        IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+        w.setInfoStream(VERBOSE ? System.out : null);
+        w.deleteDocuments(new Term("field2", "a11"));
+        w.deleteDocuments(new Term("field2", "b30"));
+        w.close();
+        break;
+      }
+      case 1: {
+        IndexReader reader = IndexReader.open(dir, false);
+        reader.setNorm(4, "field1", 123);
+        reader.setNorm(44, "field2", 222);
+        reader.setNorm(44, "field4", 22);
+        reader.close();
+        break;
+      }
+      case 2: {
+        IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+        w.optimize();
+        w.close();
+        break;
+      }
+      case 3: {
+        IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+        w.addDocument(createDocument(101, 4));
+        w.optimize();
+        w.addDocument(createDocument(102, 4));
+        w.addDocument(createDocument(103, 4));
+        w.close();
+        break;
+      }
+      case 4: {
+        IndexReader reader = IndexReader.open(dir, false);
+        reader.setNorm(5, "field1", 123);
+        reader.setNorm(55, "field2", 222);
+        reader.close();
+        break;
+      }
+      case 5: {
+        IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+        w.addDocument(createDocument(101, 4));
+        w.close();
+        break;
+      }
+    }
+  }  
+  
+  private void assertReaderClosed(IndexReader reader, boolean checkSubReaders, boolean checkNormsClosed) {
+    assertEquals(0, reader.getRefCount());
+    
+    if (checkNormsClosed && reader instanceof SegmentReader) {
+      assertTrue(((SegmentReader) reader).normsClosed());
+    }
+    
+    if (checkSubReaders) {
+      if (reader instanceof DirectoryReader) {
+        IndexReader[] subReaders = reader.getSequentialSubReaders();
+        for (int i = 0; i < subReaders.length; i++) {
+          assertReaderClosed(subReaders[i], checkSubReaders, checkNormsClosed);
+        }
+      }
+      
+      if (reader instanceof MultiReader) {
+        IndexReader[] subReaders = reader.getSequentialSubReaders();
+        for (int i = 0; i < subReaders.length; i++) {
+          assertReaderClosed(subReaders[i], checkSubReaders, checkNormsClosed);
+        }
+      }
+      
+      if (reader instanceof ParallelReader) {
+        IndexReader[] subReaders = ((ParallelReader) reader).getSubReaders();
+        for (int i = 0; i < subReaders.length; i++) {
+          assertReaderClosed(subReaders[i], checkSubReaders, checkNormsClosed);
+        }
+      }
+    }
+  }
+
+  /*
+  private void assertReaderOpen(IndexReader reader) {
+    reader.ensureOpen();
+    
+    if (reader instanceof DirectoryReader) {
+      IndexReader[] subReaders = reader.getSequentialSubReaders();
+      for (int i = 0; i < subReaders.length; i++) {
+        assertReaderOpen(subReaders[i]);
+      }
+    }
+  }
+  */
+
+  private void assertRefCountEquals(int refCount, IndexReader reader) {
+    assertEquals("Reader has wrong refCount value.", refCount, reader.getRefCount());
+  }
+
+
+  private abstract static class TestReopen {
+    protected abstract IndexReader openReader() throws IOException;
+    protected abstract void modifyIndex(int i) throws IOException;
+  }
+  
+  public void testCloseOrig() throws Throwable {
+    Directory dir = newDirectory();
+    createIndex(random, dir, false);
+    IndexReader r1 = IndexReader.open(dir, false);
+    IndexReader r2 = IndexReader.open(dir, false);
+    r2.deleteDocument(0);
+    r2.close();
+
+    IndexReader r3 = r1.reopen();
+    assertTrue(r1 != r3);
+    r1.close();
+    try {
+      r1.document(2);
+      fail("did not hit exception");
+    } catch (AlreadyClosedException ace) {
+      // expected
+    }
+    r3.close();
+    dir.close();
+  }
+
+  public void testDeletes() throws Throwable {
+    Directory dir = newDirectory();
+    createIndex(random, dir, false); // Create an index with a bunch of docs (1 segment)
+
+    modifyIndex(0, dir); // Get delete bitVector on 1st segment
+    modifyIndex(5, dir); // Add a doc (2 segments)
+
+    IndexReader r1 = IndexReader.open(dir, false); // MSR
+
+    modifyIndex(5, dir); // Add another doc (3 segments)
+
+    IndexReader r2 = r1.reopen(); // MSR
+    assertTrue(r1 != r2);
+
+    SegmentReader sr1 = (SegmentReader) r1.getSequentialSubReaders()[0]; // Get SRs for the first segment from original
+    SegmentReader sr2 = (SegmentReader) r2.getSequentialSubReaders()[0]; // and reopened IRs
+
+    // At this point they share the same BitVector
+    assertTrue(sr1.deletedDocs==sr2.deletedDocs);
+
+    r2.deleteDocument(0);
+
+    // r1 should not see the delete
+    assertFalse(r1.isDeleted(0));
+
+    // Now r2 should have made a private copy of deleted docs:
+    assertTrue(sr1.deletedDocs!=sr2.deletedDocs);
+
+    r1.close();
+    r2.close();
+    dir.close();
+  }
+
+  public void testDeletes2() throws Throwable {
+    Directory dir = newDirectory();
+    createIndex(random, dir, false);
+    // Get delete bitVector
+    modifyIndex(0, dir);
+    IndexReader r1 = IndexReader.open(dir, false);
+
+    // Add doc:
+    modifyIndex(5, dir);
+
+    IndexReader r2 = r1.reopen();
+    assertTrue(r1 != r2);
+
+    IndexReader[] rs2 = r2.getSequentialSubReaders();
+
+    SegmentReader sr1 = SegmentReader.getOnlySegmentReader(r1);
+    SegmentReader sr2 = (SegmentReader) rs2[0];
+
+    // At this point they share the same BitVector
+    assertTrue(sr1.deletedDocs==sr2.deletedDocs);
+    final BitVector delDocs = sr1.deletedDocs;
+    r1.close();
+
+    r2.deleteDocument(0);
+    assertTrue(delDocs==sr2.deletedDocs);
+    r2.close();
+    dir.close();
+  }
+
+  private static class KeepAllCommits implements IndexDeletionPolicy {
+    public void onInit(List<? extends IndexCommit> commits) {
+    }
+    public void onCommit(List<? extends IndexCommit> commits) {
+    }
+  }
+
+  public void testReopenOnCommit() throws Throwable {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setIndexDeletionPolicy(new KeepAllCommits()).
+            setMaxBufferedDocs(-1).
+            setMergePolicy(newLogMergePolicy(10))
+    );
+    for(int i=0;i<4;i++) {
+      Document doc = new Document();
+      doc.add(newField("id", ""+i, Field.Store.NO, Field.Index.NOT_ANALYZED));
+      writer.addDocument(doc);
+      Map<String,String> data = new HashMap<String,String>();
+      data.put("index", i+"");
+      writer.commit(data);
+    }
+    for(int i=0;i<4;i++) {
+      writer.deleteDocuments(new Term("id", ""+i));
+      Map<String,String> data = new HashMap<String,String>();
+      data.put("index", (4+i)+"");
+      writer.commit(data);
+    }
+    writer.close();
+
+    IndexReader r = IndexReader.open(dir, false);
+    assertEquals(0, r.numDocs());
+
+    Collection<IndexCommit> commits = IndexReader.listCommits(dir);
+    for (final IndexCommit commit : commits) {
+      IndexReader r2 = r.reopen(commit);
+      assertTrue(r2 != r);
+
+      // Reader should be readOnly
+      try {
+        r2.deleteDocument(0);
+        fail("no exception hit");
+      } catch (UnsupportedOperationException uoe) {
+        // expected
+      }
+
+      final Map<String,String> s = commit.getUserData();
+      final int v;
+      if (s.size() == 0) {
+        // First commit created by IW
+        v = -1;
+      } else {
+        v = Integer.parseInt(s.get("index"));
+      }
+      if (v < 4) {
+        assertEquals(1+v, r2.numDocs());
+      } else {
+        assertEquals(7-v, r2.numDocs());
+      }
+      r.close();
+      r = r2;
+    }
+    r.close();
+    dir.close();
+  }
+  
+  // LUCENE-1579: Make sure all SegmentReaders are new when
+  // reopen switches readOnly
+  public void testReopenChangeReadonly() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(-1).
+            setMergePolicy(newLogMergePolicy(10))
+    );
+    Document doc = new Document();
+    doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
+    writer.addDocument(doc);
+    writer.commit();
+
+    // Open reader1
+    IndexReader r = IndexReader.open(dir, false);
+    assertTrue(r instanceof DirectoryReader);
+    IndexReader r1 = SegmentReader.getOnlySegmentReader(r);
+    final int[] ints = FieldCache.DEFAULT.getInts(r1, "number");
+    assertEquals(1, ints.length);
+    assertEquals(17, ints[0]);
+
+    // Reopen to readonly w/ no chnages
+    IndexReader r3 = r.reopen(true);
+    assertTrue(r3 instanceof ReadOnlyDirectoryReader);
+    r3.close();
+
+    // Add new segment
+    writer.addDocument(doc);
+    writer.commit();
+
+    // Reopen reader1 --> reader2
+    IndexReader r2 = r.reopen(true);
+    r.close();
+    assertTrue(r2 instanceof ReadOnlyDirectoryReader);
+    IndexReader[] subs = r2.getSequentialSubReaders();
+    final int[] ints2 = FieldCache.DEFAULT.getInts(subs[0], "number");
+    r2.close();
+
+    assertTrue(subs[0] instanceof ReadOnlySegmentReader);
+    assertTrue(subs[1] instanceof ReadOnlySegmentReader);
+    assertTrue(ints == ints2);
+
+    writer.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriter.java
new file mode 100644
index 0000000..8bb051e
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -0,0 +1,1940 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.io.Reader;
+import java.io.StringReader;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Collections;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.analysis.WhitespaceTokenizer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.Lock;
+import org.apache.lucene.store.LockFactory;
+import org.apache.lucene.store.NoLockFactory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.store.SingleInstanceLockFactory;
+import org.apache.lucene.util._TestUtil;
+import org.apache.lucene.util.ThreadInterruptedException;
+
+public class TestIndexWriter extends LuceneTestCase {
+
+    public void testDocCount() throws IOException {
+        Directory dir = newDirectory();
+
+        IndexWriter writer = null;
+        IndexReader reader = null;
+        int i;
+
+        long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
+        try {
+          IndexWriterConfig.setDefaultWriteLockTimeout(2000);
+          assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
+          writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+        } finally {
+          IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
+        }
+
+        // add 100 documents
+        for (i = 0; i < 100; i++) {
+            addDoc(writer);
+        }
+        assertEquals(100, writer.maxDoc());
+        writer.close();
+
+        // delete 40 documents
+        reader = IndexReader.open(dir, false);
+        for (i = 0; i < 40; i++) {
+            reader.deleteDocument(i);
+        }
+        reader.close();
+
+        reader = IndexReader.open(dir, true);
+        assertEquals(60, reader.numDocs());
+        reader.close();
+
+        // optimize the index and check that the new doc count is correct
+        writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+        assertEquals(60, writer.numDocs());
+        writer.optimize();
+        assertEquals(60, writer.maxDoc());
+        assertEquals(60, writer.numDocs());
+        writer.close();
+
+        // check that the index reader gives the same numbers.
+        reader = IndexReader.open(dir, true);
+        assertEquals(60, reader.maxDoc());
+        assertEquals(60, reader.numDocs());
+        reader.close();
+
+        // make sure opening a new index for create over
+        // this existing one works correctly:
+        writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
+        assertEquals(0, writer.maxDoc());
+        assertEquals(0, writer.numDocs());
+        writer.close();
+        dir.close();
+    }
+
+    static void addDoc(IndexWriter writer) throws IOException
+    {
+        Document doc = new Document();
+        doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+        writer.addDocument(doc);
+    }
+
+    static void addDocWithIndex(IndexWriter writer, int index) throws IOException
+    {
+        Document doc = new Document();
+        doc.add(newField("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(newField("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
+        writer.addDocument(doc);
+    }                            
+
+    public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
+      String[] startFiles = dir.listAll();
+      SegmentInfos infos = new SegmentInfos();
+      infos.read(dir);
+      new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))).rollback();
+      String[] endFiles = dir.listAll();
+
+      Arrays.sort(startFiles);
+      Arrays.sort(endFiles);
+
+      if (!Arrays.equals(startFiles, endFiles)) {
+        fail(message + ": before delete:\n    " + arrayToString(startFiles) + "\n  after delete:\n    " + arrayToString(endFiles));
+      }
+    }
+
+    /**
+     * Make sure we skip wicked long terms.
+    */
+    public void testWickedLongTerm() throws IOException {
+      MockDirectoryWrapper dir = newDirectory();
+      IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT)));
+
+      char[] chars = new char[DocumentsWriter.CHAR_BLOCK_SIZE-1];
+      Arrays.fill(chars, 'x');
+      Document doc = new Document();
+      final String bigTerm = new String(chars);
+
+      // Max length term is 16383, so this contents produces
+      // a too-long term:
+      String contents = "abc xyz x" + bigTerm + " another term";
+      doc.add(new Field("content", contents, Field.Store.NO, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+
+      // Make sure we can add another normal document
+      doc = new Document();
+      doc.add(new Field("content", "abc bbb ccc", Field.Store.NO, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+      writer.close();
+
+      IndexReader reader = IndexReader.open(dir, true);
+
+      // Make sure all terms < max size were indexed
+      assertEquals(2, reader.docFreq(new Term("content", "abc")));
+      assertEquals(1, reader.docFreq(new Term("content", "bbb")));
+      assertEquals(1, reader.docFreq(new Term("content", "term")));
+      assertEquals(1, reader.docFreq(new Term("content", "another")));
+
+      // Make sure position is still incremented when
+      // massive term is skipped:
+      TermPositions tps = reader.termPositions(new Term("content", "another"));
+      assertTrue(tps.next());
+      assertEquals(1, tps.freq());
+      assertEquals(3, tps.nextPosition());
+
+      // Make sure the doc that has the massive term is in
+      // the index:
+      assertEquals("document with wicked long term should is not in the index!", 2, reader.numDocs());
+
+      reader.close();
+
+      // Make sure we can add a document with exactly the
+      // maximum length term, and search on that term:
+      doc = new Document();
+      doc.add(new Field("content", bigTerm, Field.Store.NO, Field.Index.ANALYZED));
+      StandardAnalyzer sa = new StandardAnalyzer(TEST_VERSION_CURRENT);
+      sa.setMaxTokenLength(100000);
+      writer  = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, sa));
+      writer.addDocument(doc);
+      writer.close();
+      reader = IndexReader.open(dir, true);
+      assertEquals(1, reader.docFreq(new Term("content", bigTerm)));
+      reader.close();
+
+      dir.close();
+    }
+
+    static String arrayToString(String[] l) {
+      String s = "";
+      for(int i=0;i<l.length;i++) {
+        if (i > 0) {
+          s += "\n    ";
+        }
+        s += l[i];
+      }
+      return s;
+    }
+
+    // Make sure we can open an index for create even when a
+    // reader holds it open (this fails pre lock-less
+    // commits on windows):
+    public void testCreateWithReader() throws IOException {
+      Directory dir = newDirectory();
+      
+      // add one document & close writer
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      addDoc(writer);
+      writer.close();
+      
+      // now open reader:
+      IndexReader reader = IndexReader.open(dir, true);
+      assertEquals("should be one document", reader.numDocs(), 1);
+      
+      // now open index for create:
+      writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
+      assertEquals("should be zero documents", writer.maxDoc(), 0);
+      addDoc(writer);
+      writer.close();
+      
+      assertEquals("should be one document", reader.numDocs(), 1);
+      IndexReader reader2 = IndexReader.open(dir, true);
+      assertEquals("should be one document", reader2.numDocs(), 1);
+      reader.close();
+      reader2.close();
+      
+      dir.close();
+    }
+
+    public void testChangesAfterClose() throws IOException {
+        Directory dir = newDirectory();
+
+        IndexWriter writer = null;
+
+        writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+        addDoc(writer);
+
+        // close
+        writer.close();
+        try {
+          addDoc(writer);
+          fail("did not hit AlreadyClosedException");
+        } catch (AlreadyClosedException e) {
+          // expected
+        }
+        dir.close();
+    }
+
+    public void testIndexNoDocuments() throws IOException {
+      MockDirectoryWrapper dir = newDirectory();      
+      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      writer.commit();
+      writer.close();
+
+      IndexReader reader = IndexReader.open(dir, true);
+      assertEquals(0, reader.maxDoc());
+      assertEquals(0, reader.numDocs());
+      reader.close();
+
+      writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+      writer.commit();
+      writer.close();
+
+      reader = IndexReader.open(dir, true);
+      assertEquals(0, reader.maxDoc());
+      assertEquals(0, reader.numDocs());
+      reader.close();
+      dir.close();
+    }
+
+    public void testManyFields() throws IOException {
+      MockDirectoryWrapper dir = newDirectory();      
+      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10));
+      for(int j=0;j<100;j++) {
+        Document doc = new Document();
+        doc.add(newField("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(newField("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(newField("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(newField("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(newField("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(newField("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
+        writer.addDocument(doc);
+      }
+      writer.close();
+
+      IndexReader reader = IndexReader.open(dir, true);
+      assertEquals(100, reader.maxDoc());
+      assertEquals(100, reader.numDocs());
+      for(int j=0;j<100;j++) {
+        assertEquals(1, reader.docFreq(new Term("a"+j, "aaa"+j)));
+        assertEquals(1, reader.docFreq(new Term("b"+j, "aaa"+j)));
+        assertEquals(1, reader.docFreq(new Term("c"+j, "aaa"+j)));
+        assertEquals(1, reader.docFreq(new Term("d"+j, "aaa")));
+        assertEquals(1, reader.docFreq(new Term("e"+j, "aaa")));
+        assertEquals(1, reader.docFreq(new Term("f"+j, "aaa")));
+      }
+      reader.close();
+      dir.close();
+    }
+
+    public void testSmallRAMBuffer() throws IOException {
+      MockDirectoryWrapper dir = newDirectory();      
+      IndexWriter writer  = new IndexWriter(
+          dir,
+          newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+              setRAMBufferSizeMB(0.000001).
+              setMergePolicy(newLogMergePolicy(10))
+      );
+      int lastNumFile = dir.listAll().length;
+      for(int j=0;j<9;j++) {
+        Document doc = new Document();
+        doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+        writer.addDocument(doc);
+        int numFile = dir.listAll().length;
+        // Verify that with a tiny RAM buffer we see new
+        // segment after every doc
+        assertTrue(numFile > lastNumFile);
+        lastNumFile = numFile;
+      }
+      writer.close();
+      dir.close();
+    }
+
+    /**
+     * Make sure it's OK to change RAM buffer size and // maxBufferedDocs in a
+     * write session
+     * 
+     * @deprecated after all the setters on IW go away (4.0), this test can be
+     *             removed because changing ram buffer settings during a write
+     *             session won't be possible.
+     */
+    @Deprecated
+    public void testChangingRAMBuffer() throws IOException {
+      MockDirectoryWrapper dir = newDirectory();      
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10).setRAMBufferSizeMB(
+        IndexWriterConfig.DISABLE_AUTO_FLUSH));
+
+      int lastFlushCount = -1;
+      for(int j=1;j<52;j++) {
+        Document doc = new Document();
+        doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+        writer.addDocument(doc);
+        _TestUtil.syncConcurrentMerges(writer);
+        int flushCount = writer.getFlushCount();
+        if (j == 1)
+          lastFlushCount = flushCount;
+        else if (j < 10)
+          // No new files should be created
+          assertEquals(flushCount, lastFlushCount);
+        else if (10 == j) {
+          assertTrue(flushCount > lastFlushCount);
+          lastFlushCount = flushCount;
+          writer.setRAMBufferSizeMB(0.000001);
+          writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+        } else if (j < 20) {
+          assertTrue(flushCount > lastFlushCount);
+          lastFlushCount = flushCount;
+        } else if (20 == j) {
+          writer.setRAMBufferSizeMB(16);
+          writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+          lastFlushCount = flushCount;
+        } else if (j < 30) {
+          assertEquals(flushCount, lastFlushCount);
+        } else if (30 == j) {
+          writer.setRAMBufferSizeMB(0.000001);
+          writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+        } else if (j < 40) {
+          assertTrue(flushCount> lastFlushCount);
+          lastFlushCount = flushCount;
+        } else if (40 == j) {
+          writer.setMaxBufferedDocs(10);
+          writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+          lastFlushCount = flushCount;
+        } else if (j < 50) {
+          assertEquals(flushCount, lastFlushCount);
+          writer.setMaxBufferedDocs(10);
+          writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+        } else if (50 == j) {
+          assertTrue(flushCount > lastFlushCount);
+        }
+      }
+      writer.close();
+      dir.close();
+    }
+
+    /**
+     * @deprecated after setters on IW go away, this test can be deleted because
+     *             changing those settings on IW won't be possible.
+     */
+    @Deprecated
+    public void testChangingRAMBuffer2() throws IOException {
+      MockDirectoryWrapper dir = newDirectory();      
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setMaxBufferedDocs(10).setMaxBufferedDeleteTerms(
+        10).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH));
+
+      for(int j=1;j<52;j++) {
+        Document doc = new Document();
+        doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+        writer.addDocument(doc);
+      }
+      
+      int lastFlushCount = -1;
+      for(int j=1;j<52;j++) {
+        writer.deleteDocuments(new Term("field", "aaa" + j));
+        _TestUtil.syncConcurrentMerges(writer);
+        int flushCount = writer.getFlushCount();
+        if (j == 1)
+          lastFlushCount = flushCount;
+        else if (j < 10) {
+          // No new files should be created
+          assertEquals(flushCount, lastFlushCount);
+        } else if (10 == j) {
+          assertTrue(flushCount > lastFlushCount);
+          lastFlushCount = flushCount;
+          writer.setRAMBufferSizeMB(0.000001);
+          writer.setMaxBufferedDeleteTerms(1);
+        } else if (j < 20) {
+          assertTrue(flushCount > lastFlushCount);
+          lastFlushCount = flushCount;
+        } else if (20 == j) {
+          writer.setRAMBufferSizeMB(16);
+          writer.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+          lastFlushCount = flushCount;
+        } else if (j < 30) {
+          assertEquals(flushCount, lastFlushCount);
+        } else if (30 == j) {
+          writer.setRAMBufferSizeMB(0.000001);
+          writer.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+          writer.setMaxBufferedDeleteTerms(1);
+        } else if (j < 40) {
+          assertTrue(flushCount> lastFlushCount);
+          lastFlushCount = flushCount;
+        } else if (40 == j) {
+          writer.setMaxBufferedDeleteTerms(10);
+          writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+          lastFlushCount = flushCount;
+        } else if (j < 50) {
+          assertEquals(flushCount, lastFlushCount);
+          writer.setMaxBufferedDeleteTerms(10);
+          writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+        } else if (50 == j) {
+          assertTrue(flushCount > lastFlushCount);
+        }
+      }
+      writer.close();
+      dir.close();
+    }
+
+    // Make sure it's OK to change RAM buffer size and
+    // maxBufferedDocs in a write session, using IW.getConfig()
+    public void testChangingRAMBufferWithIWC() throws IOException {
+      Directory dir = newDirectory();      
+      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      writer.getConfig().setMaxBufferedDocs(10);
+      writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+
+      int lastFlushCount = -1;
+      for(int j=1;j<52;j++) {
+        Document doc = new Document();
+        doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+        writer.addDocument(doc);
+        _TestUtil.syncConcurrentMerges(writer);
+        int flushCount = writer.getFlushCount();
+        if (j == 1)
+          lastFlushCount = flushCount;
+        else if (j < 10)
+          // No new files should be created
+          assertEquals(flushCount, lastFlushCount);
+        else if (10 == j) {
+          assertTrue(flushCount > lastFlushCount);
+          lastFlushCount = flushCount;
+          writer.getConfig().setRAMBufferSizeMB(0.000001);
+          writer.getConfig().setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+        } else if (j < 20) {
+          assertTrue(flushCount > lastFlushCount);
+          lastFlushCount = flushCount;
+        } else if (20 == j) {
+          writer.getConfig().setRAMBufferSizeMB(16);
+          writer.getConfig().setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+          lastFlushCount = flushCount;
+        } else if (j < 30) {
+          assertEquals(flushCount, lastFlushCount);
+        } else if (30 == j) {
+          writer.getConfig().setRAMBufferSizeMB(0.000001);
+          writer.getConfig().setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+        } else if (j < 40) {
+          assertTrue(flushCount> lastFlushCount);
+          lastFlushCount = flushCount;
+        } else if (40 == j) {
+          writer.getConfig().setMaxBufferedDocs(10);
+          writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+          lastFlushCount = flushCount;
+        } else if (j < 50) {
+          assertEquals(flushCount, lastFlushCount);
+          writer.getConfig().setMaxBufferedDocs(10);
+          writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+        } else if (50 == j) {
+          assertTrue(flushCount > lastFlushCount);
+        }
+      }
+      writer.close();
+      dir.close();
+    }
+
+    public void testChangingRAMBuffer2WithIWC() throws IOException {
+      Directory dir = newDirectory();      
+      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      writer.getConfig().setMaxBufferedDocs(10);
+      writer.getConfig().setMaxBufferedDeleteTerms(10);
+      writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+
+      for(int j=1;j<52;j++) {
+        Document doc = new Document();
+        doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+        writer.addDocument(doc);
+      }
+      
+      int lastFlushCount = -1;
+      for(int j=1;j<52;j++) {
+        writer.deleteDocuments(new Term("field", "aaa" + j));
+        _TestUtil.syncConcurrentMerges(writer);
+        int flushCount = writer.getFlushCount();
+        if (j == 1)
+          lastFlushCount = flushCount;
+        else if (j < 10) {
+          // No new files should be created
+          assertEquals(flushCount, lastFlushCount);
+        } else if (10 == j) {
+          assertTrue(flushCount > lastFlushCount);
+          lastFlushCount = flushCount;
+          writer.getConfig().setRAMBufferSizeMB(0.000001);
+          writer.getConfig().setMaxBufferedDeleteTerms(1);
+        } else if (j < 20) {
+          assertTrue(flushCount > lastFlushCount);
+          lastFlushCount = flushCount;
+        } else if (20 == j) {
+          writer.getConfig().setRAMBufferSizeMB(16);
+          writer.getConfig().setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+          lastFlushCount = flushCount;
+        } else if (j < 30) {
+          assertEquals(flushCount, lastFlushCount);
+        } else if (30 == j) {
+          writer.getConfig().setRAMBufferSizeMB(0.000001);
+          writer.getConfig().setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+          writer.getConfig().setMaxBufferedDeleteTerms(1);
+        } else if (j < 40) {
+          assertTrue(flushCount> lastFlushCount);
+          lastFlushCount = flushCount;
+        } else if (40 == j) {
+          writer.getConfig().setMaxBufferedDeleteTerms(10);
+          writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+          lastFlushCount = flushCount;
+        } else if (j < 50) {
+          assertEquals(flushCount, lastFlushCount);
+          writer.getConfig().setMaxBufferedDeleteTerms(10);
+          writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+        } else if (50 == j) {
+          assertTrue(flushCount > lastFlushCount);
+        }
+      }
+      writer.close();
+      dir.close();
+    }
+
+    public void testDiverseDocs() throws IOException {
+      MockDirectoryWrapper dir = newDirectory();      
+      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setRAMBufferSizeMB(0.5));
+      for(int i=0;i<3;i++) {
+        // First, docs where every term is unique (heavy on
+        // Posting instances)
+        for(int j=0;j<100;j++) {
+          Document doc = new Document();
+          for(int k=0;k<100;k++) {
+            doc.add(newField("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
+          }
+          writer.addDocument(doc);
+        }
+
+        // Next, many single term docs where only one term
+        // occurs (heavy on byte blocks)
+        for(int j=0;j<100;j++) {
+          Document doc = new Document();
+          doc.add(newField("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
+          writer.addDocument(doc);
+        }
+
+        // Next, many single term docs where only one term
+        // occurs but the terms are very long (heavy on
+        // char[] arrays)
+        for(int j=0;j<100;j++) {
+          StringBuilder b = new StringBuilder();
+          String x = Integer.toString(j) + ".";
+          for(int k=0;k<1000;k++)
+            b.append(x);
+          String longTerm = b.toString();
+
+          Document doc = new Document();
+          doc.add(newField("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
+          writer.addDocument(doc);
+        }
+      }
+      writer.close();
+
+      IndexSearcher searcher = new IndexSearcher(dir, false);
+      ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
+      assertEquals(300, hits.length);
+      searcher.close();
+
+      dir.close();
+    }
+
+    public void testEnablingNorms() throws IOException {
+      MockDirectoryWrapper dir = newDirectory();      
+      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10));
+      // Enable norms for only 1 doc, pre flush
+      for(int j=0;j<10;j++) {
+        Document doc = new Document();
+        Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED); 
+        if (j != 8) {
+          f.setOmitNorms(true);
+        }
+        doc.add(f);
+        writer.addDocument(doc);
+      }
+      writer.close();
+
+      Term searchTerm = new Term("field", "aaa");
+
+      IndexSearcher searcher = new IndexSearcher(dir, false);
+      ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+      assertEquals(10, hits.length);
+      searcher.close();
+
+      writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
+      // Enable norms for only 1 doc, post flush
+      for(int j=0;j<27;j++) {
+        Document doc = new Document();
+        Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED); 
+        if (j != 26) {
+          f.setOmitNorms(true);
+        }
+        doc.add(f);
+        writer.addDocument(doc);
+      }
+      writer.close();
+      searcher = new IndexSearcher(dir, false);
+      hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+      assertEquals(27, hits.length);
+      searcher.close();
+
+      IndexReader reader = IndexReader.open(dir, true);
+      reader.close();
+
+      dir.close();
+    }
+
+    public void testHighFreqTerm() throws IOException {
+      MockDirectoryWrapper dir = newDirectory();      
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+          TEST_VERSION_CURRENT, new MockAnalyzer(random)).setRAMBufferSizeMB(0.01));
+      // Massive doc that has 128 K a's
+      StringBuilder b = new StringBuilder(1024*1024);
+      for(int i=0;i<4096;i++) {
+        b.append(" a a a a a a a a");
+        b.append(" a a a a a a a a");
+        b.append(" a a a a a a a a");
+        b.append(" a a a a a a a a");
+      }
+      Document doc = new Document();
+      doc.add(newField("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      writer.addDocument(doc);
+      writer.close();
+
+      IndexReader reader = IndexReader.open(dir, true);
+      assertEquals(1, reader.maxDoc());
+      assertEquals(1, reader.numDocs());
+      Term t = new Term("field", "a");
+      assertEquals(1, reader.docFreq(t));
+      TermDocs td = reader.termDocs(t);
+      td.next();
+      assertEquals(128*1024, td.freq());
+      reader.close();
+      dir.close();
+    }
+
+    // Make sure that a Directory implementation that does
+    // not use LockFactory at all (ie overrides makeLock and
+    // implements its own private locking) works OK.  This
+    // was raised on java-dev as loss of backwards
+    // compatibility.
+    public void testNullLockFactory() throws IOException {
+
+      final class MyRAMDirectory extends MockDirectoryWrapper {
+        private LockFactory myLockFactory;
+        MyRAMDirectory(Directory delegate) {
+          super(random, delegate);
+          lockFactory = null;
+          myLockFactory = new SingleInstanceLockFactory();
+        }
+        @Override
+        public Lock makeLock(String name) {
+          return myLockFactory.makeLock(name);
+        }
+      }
+      
+      Directory dir = new MyRAMDirectory(new RAMDirectory());
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      for (int i = 0; i < 100; i++) {
+        addDoc(writer);
+      }
+      writer.close();
+      Term searchTerm = new Term("content", "aaa");        
+      IndexSearcher searcher = new IndexSearcher(dir, false);
+      ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+      assertEquals("did not get right number of hits", 100, hits.length);
+      searcher.close();
+
+      writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setOpenMode(OpenMode.CREATE));
+      writer.close();
+      searcher.close();
+      dir.close();
+    }
+
+    public void testFlushWithNoMerging() throws IOException {
+      Directory dir = newDirectory();
+      IndexWriter writer = new IndexWriter(
+          dir,
+          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+              setMaxBufferedDocs(2).
+              setMergePolicy(newLogMergePolicy(10))
+      );
+      Document doc = new Document();
+      doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      for(int i=0;i<19;i++)
+        writer.addDocument(doc);
+      writer.flush(false, true);
+      writer.close();
+      SegmentInfos sis = new SegmentInfos();
+      sis.read(dir);
+      // Since we flushed w/o allowing merging we should now
+      // have 10 segments
+      assertEquals(10, sis.size());
+      dir.close();
+    }
+
+    // Make sure we can flush segment w/ norms, then add
+    // empty doc (no norms) and flush
+    public void testEmptyDocAfterFlushingRealDoc() throws IOException {
+      Directory dir = newDirectory();
+      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      writer.setInfoStream(VERBOSE ? System.out : null);
+      Document doc = new Document();
+      doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      writer.addDocument(doc);
+      writer.commit();
+      if (VERBOSE) {
+        System.out.println("\nTEST: now add empty doc");
+      }
+      writer.addDocument(new Document());
+      writer.close();
+      IndexReader reader = IndexReader.open(dir, true);
+      assertEquals(2, reader.numDocs());
+      reader.close();
+      dir.close();
+    }
+
+  /**
+   * Test that no NullPointerException will be raised,
+   * when adding one document with a single, empty field
+   * and term vectors enabled.
+   * @throws IOException
+   *
+   */
+  public void testBadSegment() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+   
+    Document document = new Document();
+    document.add(newField("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
+    iw.addDocument(document);
+    iw.close();
+    dir.close();
+  }
+
+  // LUCENE-1036
+  public void testMaxThreadPriority() throws IOException {
+    int pri = Thread.currentThread().getPriority();
+    try {
+      Directory dir = newDirectory();
+      IndexWriterConfig conf = newIndexWriterConfig(
+          TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy());
+      ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
+      IndexWriter iw = new IndexWriter(dir, conf);
+      Document document = new Document();
+      document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
+                             Field.TermVector.YES));
+      Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
+      for(int i=0;i<4;i++)
+        iw.addDocument(document);
+      iw.close();
+      dir.close();
+    } finally {
+      Thread.currentThread().setPriority(pri);
+    }
+  }
+
+  // Just intercepts all merges & verifies that we are never
+  // merging a segment with >= 20 (maxMergeDocs) docs
+  private class MyMergeScheduler extends MergeScheduler {
+    @Override
+    synchronized public void merge(IndexWriter writer)
+      throws CorruptIndexException, IOException {
+
+      while(true) {
+        MergePolicy.OneMerge merge = writer.getNextMerge();
+        if (merge == null) {
+          break;
+        }
+        for(int i=0;i<merge.segments.size();i++) {
+          assert merge.segments.get(i).docCount < 20;
+        }
+        writer.merge(merge);
+      }
+    }
+
+    @Override
+    public void close() {}
+  }
+
+  public void testVariableSchema() throws Exception {
+    Directory dir = newDirectory();
+    int delID = 0;
+    for(int i=0;i<20;i++) {
+      if (VERBOSE) {
+        System.out.println("TEST: iter=" + i);
+      }
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy()));
+      writer.setInfoStream(VERBOSE ? System.out : null);
+      //LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
+      //lmp.setMergeFactor(2);
+      //lmp.setUseCompoundFile(false);
+      Document doc = new Document();
+      String contents = "aa bb cc dd ee ff gg hh ii jj kk";
+
+      if (i == 7) {
+        // Add empty docs here
+        doc.add(newField("content3", "", Field.Store.NO,
+                          Field.Index.ANALYZED));
+      } else {
+        Field.Store storeVal;
+        if (i%2 == 0) {
+          doc.add(newField("content4", contents, Field.Store.YES,
+                            Field.Index.ANALYZED));
+          storeVal = Field.Store.YES;
+        } else
+          storeVal = Field.Store.NO;
+        doc.add(newField("content1", contents, storeVal,
+                          Field.Index.ANALYZED));
+        doc.add(newField("content3", "", Field.Store.YES,
+                          Field.Index.ANALYZED));
+        doc.add(newField("content5", "", storeVal,
+                          Field.Index.ANALYZED));
+      }
+
+      for(int j=0;j<4;j++)
+        writer.addDocument(doc);
+
+      writer.close();
+      IndexReader reader = IndexReader.open(dir, false);
+      reader.deleteDocument(delID++);
+      reader.close();
+
+      if (0 == i % 4) {
+        writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+        //LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
+        //lmp2.setUseCompoundFile(false);
+        writer.optimize();
+        writer.close();
+      }
+    }
+    dir.close();
+  }
+
+  public void testNoWaitClose() throws Throwable {
+    Directory directory = newDirectory();
+
+    final Document doc = new Document();
+    Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+    doc.add(idField);
+
+    for(int pass=0;pass<2;pass++) {
+      if (VERBOSE) {
+        System.out.println("TEST: pass=" + pass);
+      }
+
+      IndexWriterConfig conf = newIndexWriterConfig(
+          TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)
+          .setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy());
+      if (pass == 2) {
+        conf.setMergeScheduler(new SerialMergeScheduler());
+      }
+      IndexWriter writer = new IndexWriter(directory, conf);
+      ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(100);
+      writer.setInfoStream(VERBOSE ? System.out : null);
+
+      // have to use compound file to prevent running out of
+      // descripters when newDirectory returns a file-system
+      // backed directory:
+      ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(true);
+      
+      for(int iter=0;iter<10;iter++) {
+        if (VERBOSE) {
+          System.out.println("TEST: iter=" + iter);
+        }
+        for(int j=0;j<199;j++) {
+          idField.setValue(Integer.toString(iter*201+j));
+          writer.addDocument(doc);
+        }
+
+        int delID = iter*199;
+        for(int j=0;j<20;j++) {
+          writer.deleteDocuments(new Term("id", Integer.toString(delID)));
+          delID += 5;
+        }
+
+        // Force a bunch of merge threads to kick off so we
+        // stress out aborting them on close:
+        ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
+
+        final IndexWriter finalWriter = writer;
+        final ArrayList<Throwable> failure = new ArrayList<Throwable>();
+        Thread t1 = new Thread() {
+            @Override
+            public void run() {
+              boolean done = false;
+              while(!done) {
+                for(int i=0;i<100;i++) {
+                  try {
+                    finalWriter.addDocument(doc);
+                  } catch (AlreadyClosedException e) {
+                    done = true;
+                    break;
+                  } catch (NullPointerException e) {
+                    done = true;
+                    break;
+                  } catch (Throwable e) {
+                    e.printStackTrace(System.out);
+                    failure.add(e);
+                    done = true;
+                    break;
+                  }
+                }
+                Thread.yield();
+              }
+
+            }
+          };
+
+        if (failure.size() > 0) {
+          throw failure.get(0);
+        }
+
+        t1.start();
+
+        writer.close(false);
+        t1.join();
+
+        // Make sure reader can read
+        IndexReader reader = IndexReader.open(directory, true);
+        reader.close();
+
+        // Reopen
+        writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy()));
+        writer.setInfoStream(VERBOSE ? System.out : null);
+      }
+      writer.close();
+    }
+
+    directory.close();
+  }
+
+  // LUCENE-1084: test unlimited field length
+  public void testUnlimitedMaxFieldLength() throws IOException {
+    Directory dir = newDirectory();
+
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+
+    Document doc = new Document();
+    StringBuilder b = new StringBuilder();
+    for(int i=0;i<10000;i++)
+      b.append(" a");
+    b.append(" x");
+    doc.add(newField("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    writer.close();
+
+    IndexReader reader = IndexReader.open(dir, true);
+    Term t = new Term("field", "x");
+    assertEquals(1, reader.docFreq(t));
+    reader.close();
+    dir.close();
+  }
+
+  // LUCENE-1084: test user-specified field length
+  public void testUserSpecifiedMaxFieldLength() throws IOException {
+    Directory dir = newDirectory();
+
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+    writer.setMaxFieldLength(100000);
+
+    Document doc = new Document();
+    StringBuilder b = new StringBuilder();
+    for(int i=0;i<10000;i++)
+      b.append(" a");
+    b.append(" x");
+    doc.add(newField("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    writer.close();
+
+    IndexReader reader = IndexReader.open(dir, true);
+    Term t = new Term("field", "x");
+    assertEquals(1, reader.docFreq(t));
+    reader.close();
+    dir.close();
+  }
+
+  // LUCENE-1179
+  public void testEmptyFieldName() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    doc.add(newField("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    writer.close();
+    dir.close();
+  }
+
+
+
+  private static final class MockIndexWriter extends IndexWriter {
+
+    public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
+      super(dir, conf);
+    }
+
+    boolean afterWasCalled;
+    boolean beforeWasCalled;
+
+    @Override
+    public void doAfterFlush() {
+      afterWasCalled = true;
+    }
+    
+    @Override
+    protected void doBeforeFlush() throws IOException {
+      beforeWasCalled = true;
+    }
+  }
+  
+
+  // LUCENE-1222
+  public void testDoBeforeAfterFlush() throws IOException {
+    Directory dir = newDirectory();
+    MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    doc.add(newField("field", "a field", Field.Store.YES,
+                      Field.Index.ANALYZED));
+    w.addDocument(doc);
+    w.commit();
+    assertTrue(w.beforeWasCalled);
+    assertTrue(w.afterWasCalled);
+    w.beforeWasCalled = false;
+    w.afterWasCalled = false;
+    w.deleteDocuments(new Term("field", "field"));
+    w.commit();
+    assertTrue(w.beforeWasCalled);
+    assertTrue(w.afterWasCalled);
+    w.close();
+
+    IndexReader ir = IndexReader.open(dir, true);
+    assertEquals(0, ir.numDocs());
+    ir.close();
+
+    dir.close();
+  }
+
+  // LUCENE-1255
+  public void testNegativePositions() throws Throwable {
+    final TokenStream tokens = new TokenStream() {
+      final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+      final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+      
+      final Iterator<String> terms = Arrays.asList("a","b","c").iterator();
+      boolean first = true;
+      
+      @Override
+      public boolean incrementToken() {
+        if (!terms.hasNext()) return false;
+        clearAttributes();
+        termAtt.append(terms.next());
+        posIncrAtt.setPositionIncrement(first ? 0 : 1);
+        first = false;
+        return true;
+      }
+    };
+
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    doc.add(new Field("field", tokens));
+    w.addDocument(doc);
+    w.commit();
+
+    IndexSearcher s = new IndexSearcher(dir, false);
+    PhraseQuery pq = new PhraseQuery();
+    pq.add(new Term("field", "a"));
+    pq.add(new Term("field", "b"));
+    pq.add(new Term("field", "c"));
+    ScoreDoc[] hits = s.search(pq, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+
+    Query q = new SpanTermQuery(new Term("field", "a"));
+    hits = s.search(q, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    TermPositions tps = s.getIndexReader().termPositions(new Term("field", "a"));
+    assertTrue(tps.next());
+    assertEquals(1, tps.freq());
+    assertEquals(0, tps.nextPosition());
+    w.close();
+
+    s.close();
+    dir.close();
+  }
+
+  // LUCENE-1219
+  public void testBinaryFieldOffsetLength() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    byte[] b = new byte[50];
+    for(int i=0;i<50;i++)
+      b[i] = (byte) (i+77);
+    
+    Document doc = new Document();
+    Field f = new Field("binary", b, 10, 17);
+    byte[] bx = f.getBinaryValue();
+    assertTrue(bx != null);
+    assertEquals(50, bx.length);
+    assertEquals(10, f.getBinaryOffset());
+    assertEquals(17, f.getBinaryLength());
+    doc.add(f);
+    w.addDocument(doc);
+    w.close();
+
+    IndexReader ir = IndexReader.open(dir, true);
+    doc = ir.document(0);
+    f = doc.getField("binary");
+    b = f.getBinaryValue();
+    assertTrue(b != null);
+    assertEquals(17, b.length, 17);
+    assertEquals(87, b[0]);
+    ir.close();
+    dir.close();
+  }
+
+  // LUCENE-2529
+  public void testPositionIncrementGapEmptyField() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer analyzer = new Analyzer(){
+      Analyzer a = new WhitespaceAnalyzer( TEST_VERSION_CURRENT );
+      @Override
+      public TokenStream tokenStream(String fieldName, Reader reader){
+        return a.tokenStream(fieldName, reader);
+      }
+      @Override
+      public int getPositionIncrementGap(String fieldName) {
+        return 100;
+      }
+    };
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( 
+        TEST_VERSION_CURRENT, analyzer));
+    Document doc = new Document();
+    Field f = newField("field", "", Field.Store.NO,
+                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
+    Field f2 = newField("field", "crunch man", Field.Store.NO,
+        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
+    doc.add(f);
+    doc.add(f2);
+    w.addDocument(doc);
+    w.close();
+
+    IndexReader r = IndexReader.open(dir, true);
+    TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
+    int[] poss = tpv.getTermPositions(0);
+    assertEquals(1, poss.length);
+    assertEquals(100, poss[0]);
+    poss = tpv.getTermPositions(1);
+    assertEquals(1, poss.length);
+    assertEquals(101, poss[0]);
+    r.close();
+    dir.close();
+  }
+
+
+  // LUCENE-1468 -- make sure opening an IndexWriter with
+  // create=true does not remove non-index files
+  
+  public void testOtherFiles() throws Throwable {
+    Directory dir = newDirectory();
+    try {
+      // Create my own random file:
+      IndexOutput out = dir.createOutput("myrandomfile");
+      out.writeByte((byte) 42);
+      out.close();
+
+      new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))).close();
+
+      assertTrue(dir.fileExists("myrandomfile"));
+
+      // Make sure this does not copy myrandomfile:
+      Directory dir2 = new MockDirectoryWrapper(random, new RAMDirectory(dir));
+      assertTrue(!dir2.fileExists("myrandomfile"));
+      dir2.close();
+    } finally {
+      dir.close();
+    }
+  }
+
+  public void testDeadlock() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
+    Document doc = new Document();
+    doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
+                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    writer.addDocument(doc);
+    writer.addDocument(doc);
+    writer.addDocument(doc);
+    writer.commit();
+    // index has 2 segments
+
+    Directory dir2 = newDirectory();
+    IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer2.addDocument(doc);
+    writer2.close();
+
+    IndexReader r1 = IndexReader.open(dir2, true);
+    IndexReader r2 = (IndexReader) r1.clone();
+    writer.addIndexes(new IndexReader[] {r1, r2});
+    writer.close();
+
+    IndexReader r3 = IndexReader.open(dir, true);
+    assertEquals(5, r3.numDocs());
+    r3.close();
+
+    r1.close();
+    r2.close();
+
+    dir2.close();
+    dir.close();
+  }
+
+  private class IndexerThreadInterrupt extends Thread {
+    volatile boolean failed;
+    volatile boolean finish;
+
+    volatile boolean allowInterrupt = false;
+
+    @Override
+    public void run() {
+      // LUCENE-2239: won't work with NIOFS/MMAP
+      Directory dir = new MockDirectoryWrapper(random, new RAMDirectory()); 
+      IndexWriter w = null;
+      while(!finish) {
+        try {
+
+          while(!finish) {
+            if (w != null) {
+              w.close();
+              w = null;
+            }
+            IndexWriterConfig conf = newIndexWriterConfig( 
+                                                          TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2);
+            w = new IndexWriter(dir, conf);
+
+            Document doc = new Document();
+            doc.add(newField("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
+            for(int i=0;i<100;i++) {
+              w.addDocument(doc);
+              if (i%10 == 0) {
+                w.commit();
+              }
+            }
+            w.close();
+            w = null;
+            _TestUtil.checkIndex(dir);
+            IndexReader.open(dir, true).close();
+
+            // Strangely, if we interrupt a thread before
+            // all classes are loaded, the class loader
+            // seems to do scary things with the interrupt
+            // status.  In java 1.5, it'll throw an
+            // incorrect ClassNotFoundException.  In java
+            // 1.6, it'll silently clear the interrupt.
+            // So, on first iteration through here we
+            // don't open ourselves up for interrupts
+            // until we've done the above loop.
+            allowInterrupt = true;
+          }
+        } catch (ThreadInterruptedException re) {
+          if (VERBOSE) {
+            System.out.println("TEST: got interrupt");
+            re.printStackTrace(System.out);
+          }
+          Throwable e = re.getCause();
+          assertTrue(e instanceof InterruptedException);
+          if (finish) {
+            break;
+          }
+        } catch (Throwable t) {
+          System.out.println("FAILED; unexpected exception");
+          t.printStackTrace(System.out);
+          failed = true;
+          break;
+        }
+      }
+
+      if (!failed) {
+        // clear interrupt state:
+        Thread.interrupted();
+        if (w != null) {
+          try {
+            w.rollback();
+          } catch (IOException ioe) {
+            throw new RuntimeException(ioe);
+          }
+        }
+
+        try {
+          _TestUtil.checkIndex(dir);
+        } catch (Exception e) {
+          failed = true;
+          System.out.println("CheckIndex FAILED: unexpected exception");
+          e.printStackTrace(System.out);
+        }
+        try {
+          IndexReader r = IndexReader.open(dir, true);
+          //System.out.println("doc count=" + r.numDocs());
+          r.close();
+        } catch (Exception e) {
+          failed = true;
+          System.out.println("IndexReader.open FAILED: unexpected exception");
+          e.printStackTrace(System.out);
+        }
+      }
+      try { 
+        dir.close();
+      } catch (IOException e) { 
+        throw new RuntimeException(e); 
+      }
+    }
+  }
+
+  public void testThreadInterruptDeadlock() throws Exception {
+    IndexerThreadInterrupt t = new IndexerThreadInterrupt();
+    t.setDaemon(true);
+    t.start();
+
+    // Force class loader to load ThreadInterruptedException
+    // up front... else we can see a false failure if 2nd
+    // interrupt arrives while class loader is trying to
+    // init this class (in servicing a first interrupt):
+    assertTrue(new ThreadInterruptedException(new InterruptedException()).getCause() instanceof InterruptedException);
+    
+    // issue 100 interrupts to child thread
+    int i = 0;
+    while(i < 100) {
+      Thread.sleep(10);
+      if (t.allowInterrupt) {
+        i++;
+        t.interrupt();
+      }
+      if (!t.isAlive()) {
+        break;
+      }
+    }
+    t.finish = true;
+    t.join();
+    assertFalse(t.failed);
+  }
+
+
+  public void testIndexStoreCombos() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    byte[] b = new byte[50];
+    for(int i=0;i<50;i++)
+      b[i] = (byte) (i+77);
+
+    Document doc = new Document();
+    Field f = new Field("binary", b, 10, 17);
+    f.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc1field1")));
+    Field f2 = newField("string", "value", Field.Store.YES,Field.Index.ANALYZED);
+    f2.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc1field2")));
+    doc.add(f);
+    doc.add(f2);
+    w.addDocument(doc);
+    
+    // add 2 docs to test in-memory merging
+    f.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc2field1")));
+    f2.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc2field2")));
+    w.addDocument(doc);
+  
+    // force segment flush so we can force a segment merge with doc3 later.
+    w.commit();
+
+    f.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc3field1")));
+    f2.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc3field2")));
+
+    w.addDocument(doc);
+    w.commit();
+    w.optimize();   // force segment merge.
+    w.close();
+
+    IndexReader ir = IndexReader.open(dir, true);
+    doc = ir.document(0);
+    f = doc.getField("binary");
+    b = f.getBinaryValue();
+    assertTrue(b != null);
+    assertEquals(17, b.length, 17);
+    assertEquals(87, b[0]);
+
+    assertTrue(ir.document(0).getFieldable("binary").isBinary());
+    assertTrue(ir.document(1).getFieldable("binary").isBinary());
+    assertTrue(ir.document(2).getFieldable("binary").isBinary());
+    
+    assertEquals("value", ir.document(0).get("string"));
+    assertEquals("value", ir.document(1).get("string"));
+    assertEquals("value", ir.document(2).get("string"));
+
+
+    // test that the terms were indexed.
+    assertTrue(ir.termDocs(new Term("binary","doc1field1")).next());
+    assertTrue(ir.termDocs(new Term("binary","doc2field1")).next());
+    assertTrue(ir.termDocs(new Term("binary","doc3field1")).next());
+    assertTrue(ir.termDocs(new Term("string","doc1field2")).next());
+    assertTrue(ir.termDocs(new Term("string","doc2field2")).next());
+    assertTrue(ir.termDocs(new Term("string","doc3field2")).next());
+
+    ir.close();
+    dir.close();
+
+  }
+
+  // LUCENE-1727: make sure doc fields are stored in order
+  public void testStoredFieldsOrder() throws Throwable {
+    Directory d = newDirectory();
+    IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    doc.add(newField("zzz", "a b c", Field.Store.YES, Field.Index.NO));
+    doc.add(newField("aaa", "a b c", Field.Store.YES, Field.Index.NO));
+    doc.add(newField("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
+    w.addDocument(doc);
+    IndexReader r = w.getReader();
+    doc = r.document(0);
+    Iterator<Fieldable> it = doc.getFields().iterator();
+    assertTrue(it.hasNext());
+    Field f = (Field) it.next();
+    assertEquals(f.name(), "zzz");
+    assertEquals(f.stringValue(), "a b c");
+
+    assertTrue(it.hasNext());
+    f = (Field) it.next();
+    assertEquals(f.name(), "aaa");
+    assertEquals(f.stringValue(), "a b c");
+
+    assertTrue(it.hasNext());
+    f = (Field) it.next();
+    assertEquals(f.name(), "zzz");
+    assertEquals(f.stringValue(), "1 2 3");
+    assertFalse(it.hasNext());
+    r.close();
+    w.close();
+    d.close();
+  }
+
+  public void testNoDocsIndex() throws Throwable {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( 
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
+    writer.setInfoStream(new PrintStream(bos));
+    writer.addDocument(new Document());
+    writer.close();
+
+    dir.close();
+  }
+
+  public void testDeleteUnusedFiles() throws Exception {
+
+    for(int iter=0;iter<2;iter++) {
+      Directory dir = newDirectory();
+
+      LogMergePolicy mergePolicy = newLogMergePolicy(true);
+      mergePolicy.setNoCFSRatio(1); // This test expects all of its segments to be in CFS
+
+      IndexWriter w = new IndexWriter(
+          dir,
+          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+              setMergePolicy(mergePolicy)
+      );
+      Document doc = new Document();
+      doc.add(newField("field", "go", Field.Store.NO, Field.Index.ANALYZED));
+      w.addDocument(doc);
+      IndexReader r;
+      if (iter == 0) {
+        // use NRT
+        r = w.getReader();
+      } else {
+        // don't use NRT
+        w.commit();
+        r = IndexReader.open(dir);
+      }
+
+      List<String> files = Arrays.asList(dir.listAll());
+      assertTrue(files.contains("_0.cfs"));
+      w.addDocument(doc);
+      w.optimize();
+      if (iter == 1) {
+        w.commit();
+      }
+      IndexReader r2 = r.reopen();
+      assertTrue(r != r2);
+      files = Arrays.asList(dir.listAll());
+
+      // NOTE: here we rely on "Windows" behavior, ie, even
+      // though IW wanted to delete _0.cfs since it was
+      // optimized away, because we have a reader open
+      // against this file, it should still be here:
+      assertTrue(files.contains("_0.cfs"));
+      // optimize created this
+      //assertTrue(files.contains("_2.cfs"));
+      w.deleteUnusedFiles();
+
+      files = Arrays.asList(dir.listAll());
+      // r still holds this file open
+      assertTrue(files.contains("_0.cfs"));
+      //assertTrue(files.contains("_2.cfs"));
+
+      r.close();
+      if (iter == 0) {
+        // on closing NRT reader, it calls writer.deleteUnusedFiles
+        files = Arrays.asList(dir.listAll());
+        assertFalse(files.contains("_0.cfs"));
+      } else {
+        // now writer can remove it
+        w.deleteUnusedFiles();
+        files = Arrays.asList(dir.listAll());
+        assertFalse(files.contains("_0.cfs"));
+      }
+      //assertTrue(files.contains("_2.cfs"));
+
+      w.close();
+      r2.close();
+
+      dir.close();
+    }
+  }
+
+  public void testDeleteUnsedFiles2() throws Exception {
+    // Validates that iw.deleteUnusedFiles() also deletes unused index commits
+    // in case a deletion policy which holds onto commits is used.
+    Directory dir = newDirectory();
+    SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( 
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setIndexDeletionPolicy(sdp));
+    
+    // First commit
+    Document doc = new Document();
+    doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    writer.addDocument(doc);
+    writer.commit();
+    assertEquals(1, IndexReader.listCommits(dir).size());
+
+    // Keep that commit
+    sdp.snapshot("id");
+    
+    // Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
+    doc = new Document();
+    doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    writer.addDocument(doc);
+    writer.commit();
+    assertEquals(2, IndexReader.listCommits(dir).size());
+
+    // Should delete the unreferenced commit
+    sdp.release("id");
+    writer.deleteUnusedFiles();
+    assertEquals(1, IndexReader.listCommits(dir).size());
+    
+    writer.close();
+    dir.close();
+  }
+  
+  private static class FlushCountingIndexWriter extends IndexWriter {
+    int flushCount;
+    public FlushCountingIndexWriter(Directory dir, IndexWriterConfig iwc) throws IOException {
+      super(dir, iwc);
+    }
+    @Override
+    public void doAfterFlush() {
+      flushCount++;
+    }
+  }
+
+  public void testEmptyFSDirWithNoLock() throws Exception {
+    // Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF),
+    // then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed 
+    // when listAll() was called in IndexFileDeleter.
+    Directory dir = newFSDirectory(_TestUtil.getTempDir("emptyFSDirNoLock"), NoLockFactory.getNoLockFactory());
+    new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))).close();
+    dir.close();
+  }
+
+  public void testEmptyDirRollback() throws Exception {
+    // Tests that if IW is created over an empty Directory, some documents are
+    // indexed, flushed (but not committed) and then IW rolls back, then no 
+    // files are left in the Directory.
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( 
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+                                         .setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy()));
+    String[] files = dir.listAll();
+
+    writer.setInfoStream(VERBOSE ? System.out : null);
+
+    // Creating over empty dir should not create any files,
+    // or, at most the write.lock file
+    final int extraFileCount;
+    if (files.length == 1) {
+      assertTrue(files[0].endsWith("write.lock"));
+      extraFileCount = 1;
+    } else {
+      assertEquals(0, files.length);
+      extraFileCount = 0;
+    }
+
+    Document doc = new Document();
+    // create as many files as possible
+    doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    writer.addDocument(doc);
+    // Adding just one document does not call flush yet.
+    assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length);
+    
+    doc = new Document();
+    doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    writer.addDocument(doc);
+
+    // The second document should cause a flush.
+    assertTrue("flush should have occurred and files should have been created", dir.listAll().length > 5 + extraFileCount);
+
+    // After rollback, IW should remove all files
+    writer.rollback();
+    assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length);
+
+    // Since we rolled-back above, that close should be a no-op
+    writer.close();
+    assertEquals("expected a no-op close after IW.rollback()", 0, dir.listAll().length);
+    dir.close();
+  }
+
+  public void testNoSegmentFile() throws IOException {
+    Directory dir = newDirectory();
+    dir.setLockFactory(NoLockFactory.getNoLockFactory());
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( 
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
+    
+    Document doc = new Document();
+    doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    w.addDocument(doc);
+    w.addDocument(doc);
+    IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig( 
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)
+        .setOpenMode(OpenMode.CREATE));
+    
+    w2.close();
+    w.rollback();
+    dir.close();
+  }
+
+  public void testRandomStoredFields() throws IOException {
+    Directory dir = newDirectory();
+    Random rand = random;
+    RandomIndexWriter w = new RandomIndexWriter(rand, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(_TestUtil.nextInt(rand, 5, 20)));
+    //w.w.setInfoStream(System.out);
+    //w.w.setUseCompoundFile(false);
+    if (VERBOSE) {
+      w.w.setInfoStream(System.out);
+    }
+    final int docCount = atLeast(200);
+    final int fieldCount = _TestUtil.nextInt(rand, 1, 5);
+      
+    final List<Integer> fieldIDs = new ArrayList<Integer>();
+
+    Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+
+    for(int i=0;i<fieldCount;i++) {
+      fieldIDs.add(i);
+    }
+
+    final Map<String,Document> docs = new HashMap<String,Document>();
+    
+    if (VERBOSE) {
+      System.out.println("TEST: build index docCount=" + docCount);
+    }
+
+    for(int i=0;i<docCount;i++) {
+      Document doc = new Document();
+      doc.add(idField);
+      final String id = ""+i;
+      idField.setValue(id);
+      docs.put(id, doc);
+
+      for(int field: fieldIDs) {
+        final String s;
+        if (rand.nextInt(4) != 3) {
+          s = _TestUtil.randomUnicodeString(rand, 1000);
+          doc.add(newField("f"+field, s, Field.Store.YES, Field.Index.NO));
+        } else {
+          s = null;
+        }
+      }
+      w.addDocument(doc);
+      if (rand.nextInt(50) == 17) {
+        // mixup binding of field name -> Number every so often
+        Collections.shuffle(fieldIDs);
+      }
+      if (rand.nextInt(5) == 3 && i > 0) {
+        final String delID = ""+rand.nextInt(i);
+        if (VERBOSE) {
+          System.out.println("TEST: delete doc " + delID);
+        }
+        w.deleteDocuments(new Term("id", delID));
+        docs.remove(delID);
+      }
+    }
+
+    if (VERBOSE) {
+      System.out.println("TEST: " + docs.size() + " docs in index; now load fields");
+    }
+    if (docs.size() > 0) {
+      String[] idsList = docs.keySet().toArray(new String[docs.size()]);
+
+      for(int x=0;x<2;x++) {
+        IndexReader r = w.getReader();
+        IndexSearcher s = newSearcher(r);
+
+        if (VERBOSE) {
+          System.out.println("TEST: cycle x=" + x + " r=" + r);
+        }
+
+        int num = atLeast(1000);
+        for(int iter=0;iter<num;iter++) {
+          String testID = idsList[rand.nextInt(idsList.length)];
+          TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
+          assertEquals(1, hits.totalHits);
+          Document doc = r.document(hits.scoreDocs[0].doc);
+          Document docExp = docs.get(testID);
+          for(int i=0;i<fieldCount;i++) {
+            assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i),  doc.get("f"+i));
+          }
+        }
+        s.close();
+        r.close();
+        w.optimize();
+      }
+    }
+    w.close();
+    dir.close();
+  }
+
+  public void testNoUnwantedTVFiles() throws Exception {
+
+    Directory dir = newDirectory();
+    IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setRAMBufferSizeMB(0.01).setMergePolicy(newLogMergePolicy()));
+    ((LogMergePolicy) indexWriter.getConfig().getMergePolicy()).setUseCompoundFile(false);
+
+    String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg";
+    BIG=BIG+BIG+BIG+BIG;
+
+    for (int i=0; i<2; i++) {
+      Document doc = new Document();
+      doc.add(new Field("id", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+      doc.add(new Field("str", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED));
+      doc.add(new Field("str2", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED));
+      doc.add(new Field("str3", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
+      indexWriter.addDocument(doc);
+    }
+
+    indexWriter.close();
+
+    assertNoUnreferencedFiles(dir, "no tv files");
+    String[] files = dir.listAll();
+    for(String file : files) {
+      assertTrue(!file.endsWith(IndexFileNames.VECTORS_FIELDS_EXTENSION));
+      assertTrue(!file.endsWith(IndexFileNames.VECTORS_INDEX_EXTENSION));
+      assertTrue(!file.endsWith(IndexFileNames.VECTORS_DOCUMENTS_EXTENSION));
+    }
+
+    dir.close();
+  }
+
+  static final class StringSplitAnalyzer extends Analyzer {
+    @Override
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      return new StringSplitTokenizer(reader);
+    }
+  }
+
+  private static class StringSplitTokenizer extends Tokenizer {
+    private String[] tokens;
+    private int upto;
+    private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+
+    public StringSplitTokenizer(Reader r) {
+      try {
+        reset(r);
+      } catch (IOException e) {
+        throw new RuntimeException(e);
+      }
+    }
+
+    @Override
+    public final boolean incrementToken() throws IOException {
+      clearAttributes();      
+      if (upto < tokens.length) {
+        termAtt.setEmpty();
+        termAtt.append(tokens[upto]);
+        upto++;
+        return true;
+      } else {
+        return false;
+      }
+    }
+
+    @Override
+    public void reset(Reader input) throws IOException {
+       this.upto = 0;
+       final StringBuilder b = new StringBuilder();
+       final char[] buffer = new char[1024];
+       int n;
+       while ((n = input.read(buffer)) != -1) {
+         b.append(buffer, 0, n);
+       }
+       this.tokens = b.toString().split(" ");
+    }
+  }
+
+  // LUCENE-3183
+  public void testEmptyFieldNameTIIOne() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    iwc.setTermIndexInterval(1);
+    iwc.setReaderTermsIndexDivisor(1);
+    IndexWriter writer = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    doc.add(newField("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    final IndexReader r = IndexReader.open(writer, true);
+    writer.close();
+    r.terms(new Term("", ""));
+    r.terms(new Term("", ""));
+    r.terms(new Term("", "a"));
+    r.terms(new Term("", ""));
+    r.close();
+    dir.close();
+  }
+
+  public void testDeleteAllNRTLeftoverFiles() throws Exception {
+
+    Directory d = new MockDirectoryWrapper(random, new RAMDirectory());
+    IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    for(int i = 0; i < 20; i++) {
+      for(int j = 0; j < 100; ++j) {
+        w.addDocument(doc);
+      }
+      w.commit();
+      IndexReader.open(w, true).close();
+
+      w.deleteAll();
+      w.commit();
+
+      // Make sure we accumulate no files except for empty
+      // segments_N and segments.gen:
+      assertTrue(d.listAll().length <= 2);
+    }
+
+    w.close();
+    d.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterCommit.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterCommit.java
new file mode 100644
index 0000000..563d1c1
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterCommit.java
@@ -0,0 +1,668 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockFixedLengthPayloadFilter;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestIndexWriterCommit extends LuceneTestCase {
+  /*
+   * Simple test for "commit on close": open writer then
+   * add a bunch of docs, making sure reader does not see
+   * these docs until writer is closed.
+   */
+  public void testCommitOnClose() throws IOException {
+      Directory dir = newDirectory();
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      for (int i = 0; i < 14; i++) {
+        TestIndexWriter.addDoc(writer);
+      }
+      writer.close();
+
+      Term searchTerm = new Term("content", "aaa");
+      IndexSearcher searcher = new IndexSearcher(dir, false);
+      ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+      assertEquals("first number of hits", 14, hits.length);
+      searcher.close();
+
+      IndexReader reader = IndexReader.open(dir, true);
+
+      writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      for(int i=0;i<3;i++) {
+        for(int j=0;j<11;j++) {
+          TestIndexWriter.addDoc(writer);
+        }
+        searcher = new IndexSearcher(dir, false);
+        hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+        assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
+        searcher.close();
+        assertTrue("reader should have still been current", reader.isCurrent());
+      }
+
+      // Now, close the writer:
+      writer.close();
+      assertFalse("reader should not be current now", reader.isCurrent());
+
+      searcher = new IndexSearcher(dir, false);
+      hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+      assertEquals("reader did not see changes after writer was closed", 47, hits.length);
+      searcher.close();
+      reader.close();
+      dir.close();
+  }
+
+  /*
+   * Simple test for "commit on close": open writer, then
+   * add a bunch of docs, making sure reader does not see
+   * them until writer has closed.  Then instead of
+   * closing the writer, call abort and verify reader sees
+   * nothing was added.  Then verify we can open the index
+   * and add docs to it.
+   */
+  public void testCommitOnCloseAbort() throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10));
+    for (int i = 0; i < 14; i++) {
+      TestIndexWriter.addDoc(writer);
+    }
+    writer.close();
+
+    Term searchTerm = new Term("content", "aaa");
+    IndexSearcher searcher = new IndexSearcher(dir, false);
+    ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+    assertEquals("first number of hits", 14, hits.length);
+    searcher.close();
+
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
+      .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
+    for(int j=0;j<17;j++) {
+      TestIndexWriter.addDoc(writer);
+    }
+    // Delete all docs:
+    writer.deleteDocuments(searchTerm);
+
+    searcher = new IndexSearcher(dir, false);
+    hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+    assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
+    searcher.close();
+
+    // Now, close the writer:
+    writer.rollback();
+
+    TestIndexWriter.assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
+
+    searcher = new IndexSearcher(dir, false);
+    hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+    assertEquals("saw changes after writer.abort", 14, hits.length);
+    searcher.close();
+
+    // Now make sure we can re-open the index, add docs,
+    // and all is good:
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
+      .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
+
+    // On abort, writer in fact may write to the same
+    // segments_N file:
+    dir.setPreventDoubleWrite(false);
+
+    for(int i=0;i<12;i++) {
+      for(int j=0;j<17;j++) {
+        TestIndexWriter.addDoc(writer);
+      }
+      searcher = new IndexSearcher(dir, false);
+      hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+      assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
+      searcher.close();
+    }
+
+    writer.close();
+    searcher = new IndexSearcher(dir, false);
+    hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+    assertEquals("didn't see changes after close", 218, hits.length);
+    searcher.close();
+
+    dir.close();
+  }
+
+  /*
+   * Verify that a writer with "commit on close" indeed
+   * cleans up the temp segments created after opening
+   * that are not referenced by the starting segments
+   * file.  We check this by using MockDirectoryWrapper to
+   * measure max temp disk space used.
+   */
+  public void testCommitOnCloseDiskUsage() throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+    Analyzer analyzer;
+    if (random.nextBoolean()) {
+      // no payloads
+     analyzer = new Analyzer() {
+        @Override
+        public TokenStream tokenStream(String fieldName, Reader reader) {
+          return new MockTokenizer(reader, MockTokenizer.WHITESPACE, true);
+        }
+      };
+    } else {
+      // fixed length payloads
+      final int length = random.nextInt(200);
+      analyzer = new Analyzer() {
+        @Override
+        public TokenStream tokenStream(String fieldName, Reader reader) {
+          return new MockFixedLengthPayloadFilter(random,
+              new MockTokenizer(reader, MockTokenizer.WHITESPACE, true),
+              length);
+        }
+      };
+    }
+    
+    IndexWriter writer  = new IndexWriter(
+        dir,
+        newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).
+            setMaxBufferedDocs(10).
+            setReaderPooling(false).
+            setMergePolicy(newLogMergePolicy(10))
+    );
+    for(int j=0;j<30;j++) {
+      TestIndexWriter.addDocWithIndex(writer, j);
+    }
+    writer.close();
+    dir.resetMaxUsedSizeInBytes();
+
+    dir.setTrackDiskUsage(true);
+    long startDiskUsage = dir.getMaxUsedSizeInBytes();
+    writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)
+            .setOpenMode(OpenMode.APPEND).
+            setMaxBufferedDocs(10).
+            setMergeScheduler(new SerialMergeScheduler()).
+            setReaderPooling(false).
+            setMergePolicy(newLogMergePolicy(10))
+
+    );
+    for(int j=0;j<1470;j++) {
+      TestIndexWriter.addDocWithIndex(writer, j);
+    }
+    long midDiskUsage = dir.getMaxUsedSizeInBytes();
+    dir.resetMaxUsedSizeInBytes();
+    writer.optimize();
+    writer.close();
+
+    IndexReader.open(dir, true).close();
+
+    long endDiskUsage = dir.getMaxUsedSizeInBytes();
+
+    // Ending index is 50X as large as starting index; due
+    // to 3X disk usage normally we allow 150X max
+    // transient usage.  If something is wrong w/ deleter
+    // and it doesn't delete intermediate segments then it
+    // will exceed this 150X:
+    // System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
+    assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage*150),
+               midDiskUsage < 150*startDiskUsage);
+    assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage*150),
+               endDiskUsage < 150*startDiskUsage);
+    dir.close();
+  }
+
+
+  /*
+   * Verify that calling optimize when writer is open for
+   * "commit on close" works correctly both for rollback()
+   * and close().
+   */
+  public void testCommitOnCloseOptimize() throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+    // Must disable throwing exc on double-write: this
+    // test uses IW.rollback which easily results in
+    // writing to same file more than once
+    dir.setPreventDoubleWrite(false);
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(10).
+            setMergePolicy(newLogMergePolicy(10))
+    );
+    for(int j=0;j<17;j++) {
+      TestIndexWriter.addDocWithIndex(writer, j);
+    }
+    writer.close();
+
+    writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+    writer.optimize();
+
+    if (VERBOSE) {
+      writer.setInfoStream(System.out);
+    }
+
+    // Open a reader before closing (commiting) the writer:
+    IndexReader reader = IndexReader.open(dir, true);
+
+    // Reader should see index as unoptimized at this
+    // point:
+    assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
+    reader.close();
+
+    // Abort the writer:
+    writer.rollback();
+    TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after optimize");
+
+    // Open a reader after aborting writer:
+    reader = IndexReader.open(dir, true);
+
+    // Reader should still see index as unoptimized:
+    assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
+    reader.close();
+
+    if (VERBOSE) {
+      System.out.println("TEST: do real optimize");
+    }
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+    if (VERBOSE) {
+      writer.setInfoStream(System.out);
+    }
+    writer.optimize();
+    writer.close();
+
+    if (VERBOSE) {
+      System.out.println("TEST: writer closed");
+    }
+    TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after optimize");
+
+    // Open a reader after aborting writer:
+    reader = IndexReader.open(dir, true);
+
+    // Reader should still see index as unoptimized:
+    assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
+    reader.close();
+    dir.close();
+  }
+  
+  // LUCENE-2095: make sure with multiple threads commit
+  // doesn't return until all changes are in fact in the
+  // index
+  public void testCommitThreadSafety() throws Throwable {
+    final int NUM_THREADS = 5;
+    final double RUN_SEC = 0.5;
+    final Directory dir = newDirectory();
+    final RandomIndexWriter w = new RandomIndexWriter(random, dir, newIndexWriterConfig(
+                                                                                        TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    _TestUtil.reduceOpenFiles(w.w);
+    w.commit();
+    final AtomicBoolean failed = new AtomicBoolean();
+    Thread[] threads = new Thread[NUM_THREADS];
+    final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
+    for(int i=0;i<NUM_THREADS;i++) {
+      final int finalI = i;
+      threads[i] = new Thread() {
+          @Override
+          public void run() {
+            try {
+              final Document doc = new Document();
+              IndexReader r = IndexReader.open(dir);
+              Field f = newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
+              doc.add(f);
+              int count = 0;
+              do {
+                if (failed.get()) break;
+                for(int j=0;j<10;j++) {
+                  final String s = finalI + "_" + String.valueOf(count++);
+                  f.setValue(s);
+                  w.addDocument(doc);
+                  w.commit();
+                  IndexReader r2 = r.reopen();
+                  assertTrue(r2 != r);
+                  r.close();
+                  r = r2;
+                  assertEquals("term=f:" + s + "; r=" + r, 1, r.docFreq(new Term("f", s)));
+                }
+              } while(System.currentTimeMillis() < endTime);
+              r.close();
+            } catch (Throwable t) {
+              failed.set(true);
+              throw new RuntimeException(t);
+            }
+          }
+        };
+      threads[i].start();
+    }
+    for(int i=0;i<NUM_THREADS;i++) {
+      threads[i].join();
+    }
+    assertFalse(failed.get());
+    w.close();
+    dir.close();
+  }
+
+  // LUCENE-1044: test writer.commit() when ac=false
+  public void testForceCommit() throws IOException {
+    Directory dir = newDirectory();
+
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(2).
+            setMergePolicy(newLogMergePolicy(5))
+    );
+    writer.commit();
+
+    for (int i = 0; i < 23; i++)
+      TestIndexWriter.addDoc(writer);
+
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(0, reader.numDocs());
+    writer.commit();
+    IndexReader reader2 = reader.reopen();
+    assertEquals(0, reader.numDocs());
+    assertEquals(23, reader2.numDocs());
+    reader.close();
+
+    for (int i = 0; i < 17; i++)
+      TestIndexWriter.addDoc(writer);
+    assertEquals(23, reader2.numDocs());
+    reader2.close();
+    reader = IndexReader.open(dir, true);
+    assertEquals(23, reader.numDocs());
+    reader.close();
+    writer.commit();
+
+    reader = IndexReader.open(dir, true);
+    assertEquals(40, reader.numDocs());
+    reader.close();
+    writer.close();
+    dir.close();
+  }
+  
+  public void testFutureCommit() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
+    Document doc = new Document();
+    w.addDocument(doc);
+
+    // commit to "first"
+    Map<String,String> commitData = new HashMap<String,String>();
+    commitData.put("tag", "first");
+    w.commit(commitData);
+
+    // commit to "second"
+    w.addDocument(doc);
+    commitData.put("tag", "second");
+    w.commit(commitData);
+    w.close();
+
+    // open "first" with IndexWriter
+    IndexCommit commit = null;
+    for(IndexCommit c : IndexReader.listCommits(dir)) {
+      if (c.getUserData().get("tag").equals("first")) {
+        commit = c;
+        break;
+      }
+    }
+
+    assertNotNull(commit);
+
+    w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit));
+
+    assertEquals(1, w.numDocs());
+
+    // commit IndexWriter to "third"
+    w.addDocument(doc);
+    commitData.put("tag", "third");
+    w.commit(commitData);
+    w.close();
+
+    // make sure "second" commit is still there
+    commit = null;
+    for(IndexCommit c : IndexReader.listCommits(dir)) {
+      if (c.getUserData().get("tag").equals("second")) {
+        commit = c;
+        break;
+      }
+    }
+
+    assertNotNull(commit);
+
+    IndexReader r = IndexReader.open(commit, true);
+    assertEquals(2, r.numDocs());
+    r.close();
+
+    // open "second", w/ writeable IndexReader & commit
+    r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
+    assertEquals(2, r.numDocs());
+    r.deleteDocument(0);
+    r.deleteDocument(1);
+    commitData.put("tag", "fourth");
+    r.commit(commitData);
+    r.close();
+
+    // make sure "third" commit is still there
+    commit = null;
+    for(IndexCommit c : IndexReader.listCommits(dir)) {
+      if (c.getUserData().get("tag").equals("third")) {
+        commit = c;
+        break;
+      }
+    }
+    assertNotNull(commit);
+
+    dir.close();
+  }
+  
+  public void testNoCommits() throws Exception {
+    // Tests that if we don't call commit(), the directory has 0 commits. This has
+    // changed since LUCENE-2386, where before IW would always commit on a fresh
+    // new index.
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    try {
+      IndexReader.listCommits(dir);
+      fail("listCommits should have thrown an exception over empty index");
+    } catch (IndexNotFoundException e) {
+      // that's expected !
+    }
+    // No changes still should generate a commit, because it's a new index.
+    writer.close();
+    assertEquals("expected 1 commits!", 1, IndexReader.listCommits(dir).size());
+    dir.close();
+  }
+  
+  // LUCENE-1274: test writer.prepareCommit()
+  public void testPrepareCommit() throws IOException {
+    Directory dir = newDirectory();
+
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(2).
+            setMergePolicy(newLogMergePolicy(5))
+    );
+    writer.commit();
+
+    for (int i = 0; i < 23; i++)
+      TestIndexWriter.addDoc(writer);
+
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(0, reader.numDocs());
+
+    writer.prepareCommit();
+
+    IndexReader reader2 = IndexReader.open(dir, true);
+    assertEquals(0, reader2.numDocs());
+
+    writer.commit();
+
+    IndexReader reader3 = reader.reopen();
+    assertEquals(0, reader.numDocs());
+    assertEquals(0, reader2.numDocs());
+    assertEquals(23, reader3.numDocs());
+    reader.close();
+    reader2.close();
+
+    for (int i = 0; i < 17; i++)
+      TestIndexWriter.addDoc(writer);
+
+    assertEquals(23, reader3.numDocs());
+    reader3.close();
+    reader = IndexReader.open(dir, true);
+    assertEquals(23, reader.numDocs());
+    reader.close();
+
+    writer.prepareCommit();
+
+    reader = IndexReader.open(dir, true);
+    assertEquals(23, reader.numDocs());
+    reader.close();
+
+    writer.commit();
+    reader = IndexReader.open(dir, true);
+    assertEquals(40, reader.numDocs());
+    reader.close();
+    writer.close();
+    dir.close();
+  }
+
+  // LUCENE-1274: test writer.prepareCommit()
+  public void testPrepareCommitRollback() throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+    dir.setPreventDoubleWrite(false);
+
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(2).
+            setMergePolicy(newLogMergePolicy(5))
+    );
+    writer.commit();
+
+    for (int i = 0; i < 23; i++)
+      TestIndexWriter.addDoc(writer);
+
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(0, reader.numDocs());
+
+    writer.prepareCommit();
+
+    IndexReader reader2 = IndexReader.open(dir, true);
+    assertEquals(0, reader2.numDocs());
+
+    writer.rollback();
+
+    IndexReader reader3 = reader.reopen();
+    assertEquals(0, reader.numDocs());
+    assertEquals(0, reader2.numDocs());
+    assertEquals(0, reader3.numDocs());
+    reader.close();
+    reader2.close();
+
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    for (int i = 0; i < 17; i++)
+      TestIndexWriter.addDoc(writer);
+
+    assertEquals(0, reader3.numDocs());
+    reader3.close();
+    reader = IndexReader.open(dir, true);
+    assertEquals(0, reader.numDocs());
+    reader.close();
+
+    writer.prepareCommit();
+
+    reader = IndexReader.open(dir, true);
+    assertEquals(0, reader.numDocs());
+    reader.close();
+
+    writer.commit();
+    reader = IndexReader.open(dir, true);
+    assertEquals(17, reader.numDocs());
+    reader.close();
+    writer.close();
+    dir.close();
+  }
+
+  // LUCENE-1274
+  public void testPrepareCommitNoChanges() throws IOException {
+    Directory dir = newDirectory();
+
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer.prepareCommit();
+    writer.commit();
+    writer.close();
+
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(0, reader.numDocs());
+    reader.close();
+    dir.close();
+  }
+  
+  // LUCENE-1382
+  public void testCommitUserData() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
+    for(int j=0;j<17;j++)
+      TestIndexWriter.addDoc(w);
+    w.close();
+
+    assertEquals(0, IndexReader.getCommitUserData(dir).size());
+
+    IndexReader r = IndexReader.open(dir, true);
+    // commit(Map) never called for this index
+    assertEquals(0, r.getCommitUserData().size());
+    r.close();
+
+    w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
+    for(int j=0;j<17;j++)
+      TestIndexWriter.addDoc(w);
+    Map<String,String> data = new HashMap<String,String>();
+    data.put("label", "test1");
+    w.commit(data);
+    w.close();
+
+    assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
+
+    r = IndexReader.open(dir, true);
+    assertEquals("test1", r.getCommitUserData().get("label"));
+    r.close();
+
+    w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    w.optimize();
+    w.close();
+
+    assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
+
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterConfig.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
new file mode 100644
index 0000000..3412b3d
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
@@ -0,0 +1,309 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.index.DocumentsWriter.IndexingChain;
+import org.apache.lucene.index.IndexWriter.IndexReaderWarmer;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.Test;
+
+public class TestIndexWriterConfig extends LuceneTestCase {
+
+  private static final class MySimilarity extends DefaultSimilarity {
+    // Does not implement anything - used only for type checking on IndexWriterConfig.
+  }
+  
+  private static final class MyIndexingChain extends IndexingChain {
+    // Does not implement anything - used only for type checking on IndexWriterConfig.
+
+    @Override
+    DocConsumer getChain(DocumentsWriter documentsWriter) {
+      return null;
+    }
+    
+  }
+
+  private static final class MyWarmer extends IndexReaderWarmer {
+    // Does not implement anything - used only for type checking on IndexWriterConfig.
+
+    @Override
+    public void warm(IndexReader reader) throws IOException {
+    }
+    
+  }
+  
+  @Test
+  public void testDefaults() throws Exception {
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    assertEquals(MockAnalyzer.class, conf.getAnalyzer().getClass());
+    assertNull(conf.getIndexCommit());
+    assertEquals(KeepOnlyLastCommitDeletionPolicy.class, conf.getIndexDeletionPolicy().getClass());
+    assertEquals(ConcurrentMergeScheduler.class, conf.getMergeScheduler().getClass());
+    assertEquals(OpenMode.CREATE_OR_APPEND, conf.getOpenMode());
+    assertTrue(Similarity.getDefault() == conf.getSimilarity());
+    assertEquals(IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL, conf.getTermIndexInterval());
+    assertEquals(IndexWriterConfig.getDefaultWriteLockTimeout(), conf.getWriteLockTimeout());
+    assertEquals(IndexWriterConfig.WRITE_LOCK_TIMEOUT, IndexWriterConfig.getDefaultWriteLockTimeout());
+    assertEquals(IndexWriterConfig.DEFAULT_MAX_BUFFERED_DELETE_TERMS, conf.getMaxBufferedDeleteTerms());
+    assertEquals(IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB, conf.getRAMBufferSizeMB(), 0.0);
+    assertEquals(IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS, conf.getMaxBufferedDocs());
+    assertEquals(IndexWriterConfig.DEFAULT_READER_POOLING, conf.getReaderPooling());
+    assertTrue(DocumentsWriter.defaultIndexingChain == conf.getIndexingChain());
+    assertNull(conf.getMergedSegmentWarmer());
+    assertEquals(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES, conf.getMaxThreadStates());
+    assertEquals(IndexWriterConfig.DEFAULT_READER_TERMS_INDEX_DIVISOR, conf.getReaderTermsIndexDivisor());
+    assertEquals(TieredMergePolicy.class, conf.getMergePolicy().getClass());
+    
+    // Sanity check - validate that all getters are covered.
+    Set<String> getters = new HashSet<String>();
+    getters.add("getAnalyzer");
+    getters.add("getIndexCommit");
+    getters.add("getIndexDeletionPolicy");
+    getters.add("getMergeScheduler");
+    getters.add("getOpenMode");
+    getters.add("getSimilarity");
+    getters.add("getTermIndexInterval");
+    getters.add("getWriteLockTimeout");
+    getters.add("getDefaultWriteLockTimeout");
+    getters.add("getMaxBufferedDeleteTerms");
+    getters.add("getRAMBufferSizeMB");
+    getters.add("getMaxBufferedDocs");
+    getters.add("getIndexingChain");
+    getters.add("getMergedSegmentWarmer");
+    getters.add("getMergePolicy");
+    getters.add("getMaxThreadStates");
+    getters.add("getReaderPooling");
+    getters.add("getReaderTermsIndexDivisor");
+    for (Method m : IndexWriterConfig.class.getDeclaredMethods()) {
+      if (m.getDeclaringClass() == IndexWriterConfig.class && m.getName().startsWith("get")) {
+        assertTrue("method " + m.getName() + " is not tested for defaults", getters.contains(m.getName()));
+      }
+    }
+  }
+
+  @Test
+  public void testSettersChaining() throws Exception {
+    // Ensures that every setter returns IndexWriterConfig to enable easy
+    // chaining.
+    for (Method m : IndexWriterConfig.class.getDeclaredMethods()) {
+      if (m.getDeclaringClass() == IndexWriterConfig.class
+          && m.getName().startsWith("set")
+          && !Modifier.isStatic(m.getModifiers())) {
+        assertEquals("method " + m.getName() + " does not return IndexWriterConfig", 
+            IndexWriterConfig.class, m.getReturnType());
+      }
+    }
+  }
+  
+  @Test
+  public void testConstants() throws Exception {
+    // Tests that the values of the constants does not change
+    assertEquals(1000, IndexWriterConfig.WRITE_LOCK_TIMEOUT);
+    assertEquals(128, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL);
+    assertEquals(-1, IndexWriterConfig.DISABLE_AUTO_FLUSH);
+    assertEquals(IndexWriterConfig.DISABLE_AUTO_FLUSH, IndexWriterConfig.DEFAULT_MAX_BUFFERED_DELETE_TERMS);
+    assertEquals(IndexWriterConfig.DISABLE_AUTO_FLUSH, IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS);
+    assertEquals(16.0, IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB, 0.0);
+    assertEquals(false, IndexWriterConfig.DEFAULT_READER_POOLING);
+    assertEquals(8, IndexWriterConfig.DEFAULT_MAX_THREAD_STATES);
+    assertEquals(IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, IndexWriterConfig.DEFAULT_READER_TERMS_INDEX_DIVISOR);
+  }
+  
+  @Test
+  public void testToString() throws Exception {
+    String str = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).toString();
+    for (Field f : IndexWriterConfig.class.getDeclaredFields()) {
+      int modifiers = f.getModifiers();
+      if (Modifier.isStatic(modifiers) && Modifier.isFinal(modifiers)) {
+        // Skip static final fields, they are only constants
+        continue;
+      } else if ("indexingChain".equals(f.getName())) {
+        // indexingChain is a package-private setting and thus is not output by
+        // toString.
+        continue;
+      }
+      assertTrue(f.getName() + " not found in toString", str.indexOf(f.getName()) != -1);
+    }
+  }
+  
+  @Test
+  public void testClone() throws Exception {
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    IndexWriterConfig clone = (IndexWriterConfig) conf.clone();
+    
+    // Clone is shallow since not all parameters are cloneable.
+    assertTrue(conf.getIndexDeletionPolicy() == clone.getIndexDeletionPolicy());
+    
+    conf.setMergeScheduler(new SerialMergeScheduler());
+    assertEquals(ConcurrentMergeScheduler.class, clone.getMergeScheduler().getClass());
+  }
+
+  @Test
+  public void testInvalidValues() throws Exception {
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    
+    // Test IndexDeletionPolicy
+    assertEquals(KeepOnlyLastCommitDeletionPolicy.class, conf.getIndexDeletionPolicy().getClass());
+    conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(null));
+    assertEquals(SnapshotDeletionPolicy.class, conf.getIndexDeletionPolicy().getClass());
+    conf.setIndexDeletionPolicy(null);
+    assertEquals(KeepOnlyLastCommitDeletionPolicy.class, conf.getIndexDeletionPolicy().getClass());
+    
+    // Test MergeScheduler
+    assertEquals(ConcurrentMergeScheduler.class, conf.getMergeScheduler().getClass());
+    conf.setMergeScheduler(new SerialMergeScheduler());
+    assertEquals(SerialMergeScheduler.class, conf.getMergeScheduler().getClass());
+    conf.setMergeScheduler(null);
+    assertEquals(ConcurrentMergeScheduler.class, conf.getMergeScheduler().getClass());
+
+    // Test Similarity
+    assertTrue(Similarity.getDefault() == conf.getSimilarity());
+    conf.setSimilarity(new MySimilarity());
+    assertEquals(MySimilarity.class, conf.getSimilarity().getClass());
+    conf.setSimilarity(null);
+    assertTrue(Similarity.getDefault() == conf.getSimilarity());
+
+    // Test IndexingChain
+    assertTrue(DocumentsWriter.defaultIndexingChain == conf.getIndexingChain());
+    conf.setIndexingChain(new MyIndexingChain());
+    assertEquals(MyIndexingChain.class, conf.getIndexingChain().getClass());
+    conf.setIndexingChain(null);
+    assertTrue(DocumentsWriter.defaultIndexingChain == conf.getIndexingChain());
+    
+    try {
+      conf.setMaxBufferedDeleteTerms(0);
+      fail("should not have succeeded to set maxBufferedDeleteTerms to 0");
+    } catch (IllegalArgumentException e) {
+      // this is expected
+    }
+
+    try {
+      conf.setMaxBufferedDocs(1);
+      fail("should not have succeeded to set maxBufferedDocs to 1");
+    } catch (IllegalArgumentException e) {
+      // this is expected
+    }
+
+    try {
+      // Disable both MAX_BUF_DOCS and RAM_SIZE_MB
+      conf.setMaxBufferedDocs(4);
+      conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+      conf.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+      fail("should not have succeeded to disable maxBufferedDocs when ramBufferSizeMB is disabled as well");
+    } catch (IllegalArgumentException e) {
+      // this is expected
+    }
+
+    conf.setRAMBufferSizeMB(IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB);
+    conf.setMaxBufferedDocs(IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS);
+    try {
+      conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+      fail("should not have succeeded to disable ramBufferSizeMB when maxBufferedDocs is disabled as well");
+    } catch (IllegalArgumentException e) {
+      // this is expected
+    }
+
+    // Test setReaderTermsIndexDivisor
+    try {
+      conf.setReaderTermsIndexDivisor(0);
+      fail("should not have succeeded to set termsIndexDivisor to 0");
+    } catch (IllegalArgumentException e) {
+      // this is expected
+    }
+    
+    // Setting to -1 is ok
+    conf.setReaderTermsIndexDivisor(-1);
+    try {
+      conf.setReaderTermsIndexDivisor(-2);
+      fail("should not have succeeded to set termsIndexDivisor to < -1");
+    } catch (IllegalArgumentException e) {
+      // this is expected
+    }
+    
+    assertEquals(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES, conf.getMaxThreadStates());
+    conf.setMaxThreadStates(5);
+    assertEquals(5, conf.getMaxThreadStates());
+    conf.setMaxThreadStates(0);
+    assertEquals(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES, conf.getMaxThreadStates());
+    
+    // Test MergePolicy
+    assertEquals(TieredMergePolicy.class, conf.getMergePolicy().getClass());
+    conf.setMergePolicy(new LogDocMergePolicy());
+    assertEquals(LogDocMergePolicy.class, conf.getMergePolicy().getClass());
+    conf.setMergePolicy(null);
+    assertEquals(LogByteSizeMergePolicy.class, conf.getMergePolicy().getClass());
+  }
+
+  /**
+   * @deprecated should be removed once all the deprecated setters are removed
+   *             from IndexWriter.
+   */
+  @Test @Deprecated
+  public void testIndexWriterSetters() throws Exception {
+    // This test intentionally tests deprecated methods. The purpose is to pass
+    // whatever the user set on IW to IWC, so that if the user calls
+    // iw.getConfig().getXYZ(), he'll get the same value he passed to
+    // iw.setXYZ().
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, conf);
+
+    writer.setSimilarity(new MySimilarity());
+    assertEquals(MySimilarity.class, writer.getConfig().getSimilarity().getClass());
+
+    writer.setMaxBufferedDeleteTerms(4);
+    assertEquals(4, writer.getConfig().getMaxBufferedDeleteTerms());
+
+    writer.setMaxBufferedDocs(10);
+    assertEquals(10, writer.getConfig().getMaxBufferedDocs());
+    
+    writer.setMergeScheduler(new SerialMergeScheduler());
+    assertEquals(SerialMergeScheduler.class, writer.getConfig().getMergeScheduler().getClass());
+    
+    writer.setRAMBufferSizeMB(1.5);
+    assertEquals(1.5, writer.getConfig().getRAMBufferSizeMB(), 0.0);
+    
+    writer.setTermIndexInterval(40);
+    assertEquals(40, writer.getConfig().getTermIndexInterval());
+    
+    writer.setWriteLockTimeout(100);
+    assertEquals(100, writer.getConfig().getWriteLockTimeout());
+    
+    writer.setMergedSegmentWarmer(new MyWarmer());
+    assertEquals(MyWarmer.class, writer.getConfig().getMergedSegmentWarmer().getClass());
+    
+    writer.setMergePolicy(new LogDocMergePolicy());
+    assertEquals(LogDocMergePolicy.class, writer.getConfig().getMergePolicy().getClass());
+    writer.close();
+    dir.close();
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterDelete.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
new file mode 100644
index 0000000..84af3a5
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
@@ -0,0 +1,1088 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestIndexWriterDelete extends LuceneTestCase {
+  
+  // test the simple case
+  public void testSimpleCase() throws IOException {
+    String[] keywords = { "1", "2" };
+    String[] unindexed = { "Netherlands", "Italy" };
+    String[] unstored = { "Amsterdam has lots of bridges",
+        "Venice has lots of canals" };
+    String[] text = { "Amsterdam", "Venice" };
+
+    Directory dir = newDirectory();
+    IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(1));
+
+    for (int i = 0; i < keywords.length; i++) {
+      Document doc = new Document();
+      doc.add(newField("id", keywords[i], Field.Store.YES,
+                        Field.Index.NOT_ANALYZED));
+      doc.add(newField("country", unindexed[i], Field.Store.YES,
+                        Field.Index.NO));
+      doc.add(newField("contents", unstored[i], Field.Store.NO,
+                        Field.Index.ANALYZED));
+      doc
+        .add(newField("city", text[i], Field.Store.YES,
+                       Field.Index.ANALYZED));
+      modifier.addDocument(doc);
+    }
+    modifier.optimize();
+    modifier.commit();
+
+    Term term = new Term("city", "Amsterdam");
+    int hitCount = getHitCount(dir, term);
+    assertEquals(1, hitCount);
+    modifier.deleteDocuments(term);
+    modifier.commit();
+    hitCount = getHitCount(dir, term);
+    assertEquals(0, hitCount);
+
+    modifier.close();
+    dir.close();
+  }
+
+  // test when delete terms only apply to disk segments
+  public void testNonRAMDelete() throws IOException {
+
+    Directory dir = newDirectory();
+    IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2)
+        .setMaxBufferedDeleteTerms(2));
+    modifier.setInfoStream(VERBOSE ? System.out : null);
+    int id = 0;
+    int value = 100;
+
+    for (int i = 0; i < 7; i++) {
+      addDoc(modifier, ++id, value);
+    }
+    modifier.commit();
+
+    assertEquals(0, modifier.getNumBufferedDocuments());
+    assertTrue(0 < modifier.getSegmentCount());
+
+    modifier.commit();
+
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(7, reader.numDocs());
+    reader.close();
+
+    modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+
+    modifier.commit();
+
+    reader = IndexReader.open(dir, true);
+    assertEquals(0, reader.numDocs());
+    reader.close();
+    modifier.close();
+    dir.close();
+  }
+
+  public void testMaxBufferedDeletes() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(1));
+
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    writer.addDocument(new Document());
+    writer.deleteDocuments(new Term("foobar", "1"));
+    writer.deleteDocuments(new Term("foobar", "1"));
+    writer.deleteDocuments(new Term("foobar", "1"));
+    assertEquals(3, writer.getFlushDeletesCount());
+    writer.close();
+    dir.close();
+  }
+
+  // test when delete terms only apply to ram segments
+  public void testRAMDeletes() throws IOException {
+    for(int t=0;t<2;t++) {
+      if (VERBOSE) {
+        System.out.println("TEST: t=" + t);
+      }
+      Directory dir = newDirectory();
+      IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
+          TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(4)
+          .setMaxBufferedDeleteTerms(4));
+      modifier.setInfoStream(VERBOSE ? System.out : null);
+      int id = 0;
+      int value = 100;
+
+      addDoc(modifier, ++id, value);
+      if (0 == t)
+        modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+      else
+        modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value))));
+      addDoc(modifier, ++id, value);
+      if (0 == t) {
+        modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+        assertEquals(2, modifier.getNumBufferedDeleteTerms());
+        assertEquals(1, modifier.getBufferedDeleteTermsSize());
+      }
+      else
+        modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value))));
+
+      addDoc(modifier, ++id, value);
+      assertEquals(0, modifier.getSegmentCount());
+      modifier.commit();
+
+      IndexReader reader = IndexReader.open(dir, true);
+      assertEquals(1, reader.numDocs());
+
+      int hitCount = getHitCount(dir, new Term("id", String.valueOf(id)));
+      assertEquals(1, hitCount);
+      reader.close();
+      modifier.close();
+      dir.close();
+    }
+  }
+
+  // test when delete terms apply to both disk and ram segments
+  public void testBothDeletes() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(100)
+        .setMaxBufferedDeleteTerms(100));
+
+    int id = 0;
+    int value = 100;
+
+    for (int i = 0; i < 5; i++) {
+      addDoc(modifier, ++id, value);
+    }
+
+    value = 200;
+    for (int i = 0; i < 5; i++) {
+      addDoc(modifier, ++id, value);
+    }
+    modifier.commit();
+
+    for (int i = 0; i < 5; i++) {
+      addDoc(modifier, ++id, value);
+    }
+    modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+
+    modifier.commit();
+
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(5, reader.numDocs());
+    modifier.close();
+    reader.close();
+    dir.close();
+  }
+
+  // test that batched delete terms are flushed together
+  public void testBatchDeletes() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2)
+        .setMaxBufferedDeleteTerms(2));
+
+    int id = 0;
+    int value = 100;
+
+    for (int i = 0; i < 7; i++) {
+      addDoc(modifier, ++id, value);
+    }
+    modifier.commit();
+
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(7, reader.numDocs());
+    reader.close();
+      
+    id = 0;
+    modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
+    modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
+
+    modifier.commit();
+
+    reader = IndexReader.open(dir, true);
+    assertEquals(5, reader.numDocs());
+    reader.close();
+
+    Term[] terms = new Term[3];
+    for (int i = 0; i < terms.length; i++) {
+      terms[i] = new Term("id", String.valueOf(++id));
+    }
+    modifier.deleteDocuments(terms);
+    modifier.commit();
+    reader = IndexReader.open(dir, true);
+    assertEquals(2, reader.numDocs());
+    reader.close();
+
+    modifier.close();
+    dir.close();
+  }
+
+  // test deleteAll()
+  public void testDeleteAll() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2)
+        .setMaxBufferedDeleteTerms(2));
+
+    int id = 0;
+    int value = 100;
+
+    for (int i = 0; i < 7; i++) {
+      addDoc(modifier, ++id, value);
+    }
+    modifier.commit();
+
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(7, reader.numDocs());
+    reader.close();
+
+    // Add 1 doc (so we will have something buffered)
+    addDoc(modifier, 99, value);
+
+    // Delete all
+    modifier.deleteAll();
+
+    // Delete all shouldn't be on disk yet
+    reader = IndexReader.open(dir, true);
+    assertEquals(7, reader.numDocs());
+    reader.close();
+
+    // Add a doc and update a doc (after the deleteAll, before the commit)
+    addDoc(modifier, 101, value);
+    updateDoc(modifier, 102, value);
+
+    // commit the delete all
+    modifier.commit();
+
+    // Validate there are no docs left
+    reader = IndexReader.open(dir, true);
+    assertEquals(2, reader.numDocs());
+    reader.close();
+
+    modifier.close();
+    dir.close();
+  }
+
+  // test rollback of deleteAll()
+  public void testDeleteAllRollback() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2)
+        .setMaxBufferedDeleteTerms(2));
+    
+    int id = 0;
+    int value = 100;
+    
+    for (int i = 0; i < 7; i++) {
+      addDoc(modifier, ++id, value);
+    }
+    modifier.commit();
+    
+    addDoc(modifier, ++id, value);
+
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(7, reader.numDocs());
+    reader.close();
+    
+    // Delete all
+    modifier.deleteAll(); 
+
+    // Roll it back
+    modifier.rollback();
+    modifier.close();
+    
+    // Validate that the docs are still there
+    reader = IndexReader.open(dir, true);
+    assertEquals(7, reader.numDocs());
+    reader.close();
+    
+    dir.close();
+  }
+
+
+  // test deleteAll() w/ near real-time reader
+  public void testDeleteAllNRT() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2)
+        .setMaxBufferedDeleteTerms(2));
+    
+    int id = 0;
+    int value = 100;
+    
+    for (int i = 0; i < 7; i++) {
+      addDoc(modifier, ++id, value);
+    }
+    modifier.commit();
+
+    IndexReader reader = modifier.getReader();
+    assertEquals(7, reader.numDocs());
+    reader.close();
+
+    addDoc(modifier, ++id, value);
+    addDoc(modifier, ++id, value);
+    
+    // Delete all
+    modifier.deleteAll(); 
+
+    reader = modifier.getReader();
+    assertEquals(0, reader.numDocs());
+    reader.close();
+    
+
+    // Roll it back
+    modifier.rollback();
+    modifier.close();
+    
+    // Validate that the docs are still there
+    reader = IndexReader.open(dir, true);
+    assertEquals(7, reader.numDocs());
+    reader.close();
+    
+    dir.close();
+  }
+
+
+  private void updateDoc(IndexWriter modifier, int id, int value)
+      throws IOException {
+    Document doc = new Document();
+    doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("id", String.valueOf(id), Field.Store.YES,
+        Field.Index.NOT_ANALYZED));
+    doc.add(newField("value", String.valueOf(value), Field.Store.NO,
+        Field.Index.NOT_ANALYZED));
+    modifier.updateDocument(new Term("id", String.valueOf(id)), doc);
+  }
+
+
+  private void addDoc(IndexWriter modifier, int id, int value)
+      throws IOException {
+    Document doc = new Document();
+    doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("id", String.valueOf(id), Field.Store.YES,
+        Field.Index.NOT_ANALYZED));
+    doc.add(newField("value", String.valueOf(value), Field.Store.NO,
+        Field.Index.NOT_ANALYZED));
+    modifier.addDocument(doc);
+  }
+
+  private int getHitCount(Directory dir, Term term) throws IOException {
+    IndexSearcher searcher = new IndexSearcher(dir, true);
+    int hitCount = searcher.search(new TermQuery(term), null, 1000).totalHits;
+    searcher.close();
+    return hitCount;
+  }
+
+  public void testDeletesOnDiskFull() throws IOException {
+    doTestOperationsOnDiskFull(false);
+  }
+
+  public void testUpdatesOnDiskFull() throws IOException {
+    doTestOperationsOnDiskFull(true);
+  }
+
+  /**
+   * Make sure if modifier tries to commit but hits disk full that modifier
+   * remains consistent and usable. Similar to TestIndexReader.testDiskFull().
+   */
+  private void doTestOperationsOnDiskFull(boolean updates) throws IOException {
+
+    Term searchTerm = new Term("content", "aaa");
+    int START_COUNT = 157;
+    int END_COUNT = 144;
+
+    // First build up a starting index:
+    MockDirectoryWrapper startDir = newDirectory();
+    // TODO: find the resource leak that only occurs sometimes here.
+    startDir.setNoDeleteOpenFile(false);
+    IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
+    for (int i = 0; i < 157; i++) {
+      Document d = new Document();
+      d.add(newField("id", Integer.toString(i), Field.Store.YES,
+                      Field.Index.NOT_ANALYZED));
+      d.add(newField("content", "aaa " + i, Field.Store.NO,
+                      Field.Index.ANALYZED));
+      writer.addDocument(d);
+    }
+    writer.close();
+
+    long diskUsage = startDir.sizeInBytes();
+    long diskFree = diskUsage + 10;
+
+    IOException err = null;
+
+    boolean done = false;
+
+    // Iterate w/ ever increasing free disk space:
+    while (!done) {
+      if (VERBOSE) {
+        System.out.println("TEST: cycle");
+      }
+      MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir));
+      dir.setPreventDoubleWrite(false);
+      IndexWriter modifier = new IndexWriter(dir,
+                                             newIndexWriterConfig(
+                                                                  TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))
+                                             .setMaxBufferedDocs(1000)
+                                             .setMaxBufferedDeleteTerms(1000)
+                                             .setMergeScheduler(new ConcurrentMergeScheduler()));
+      ((ConcurrentMergeScheduler) modifier.getConfig().getMergeScheduler()).setSuppressExceptions();
+      modifier.setInfoStream(VERBOSE ? System.out : null);
+
+      // For each disk size, first try to commit against
+      // dir that will hit random IOExceptions & disk
+      // full; after, give it infinite disk space & turn
+      // off random IOExceptions & retry w/ same reader:
+      boolean success = false;
+
+      for (int x = 0; x < 2; x++) {
+        if (VERBOSE) {
+          System.out.println("TEST: x=" + x);
+        }
+
+        double rate = 0.1;
+        double diskRatio = ((double)diskFree) / diskUsage;
+        long thisDiskFree;
+        String testName;
+
+        if (0 == x) {
+          thisDiskFree = diskFree;
+          if (diskRatio >= 2.0) {
+            rate /= 2;
+          }
+          if (diskRatio >= 4.0) {
+            rate /= 2;
+          }
+          if (diskRatio >= 6.0) {
+            rate = 0.0;
+          }
+          if (VERBOSE) {
+            System.out.println("\ncycle: " + diskFree + " bytes");
+          }
+          testName = "disk full during reader.close() @ " + thisDiskFree
+            + " bytes";
+        } else {
+          thisDiskFree = 0;
+          rate = 0.0;
+          if (VERBOSE) {
+            System.out.println("\ncycle: same writer: unlimited disk space");
+          }
+          testName = "reader re-use after disk full";
+        }
+
+        dir.setMaxSizeInBytes(thisDiskFree);
+        dir.setRandomIOExceptionRate(rate);
+
+        try {
+          if (0 == x) {
+            int docId = 12;
+            for (int i = 0; i < 13; i++) {
+              if (updates) {
+                Document d = new Document();
+                d.add(newField("id", Integer.toString(i), Field.Store.YES,
+                                Field.Index.NOT_ANALYZED));
+                d.add(newField("content", "bbb " + i, Field.Store.NO,
+                                Field.Index.ANALYZED));
+                modifier.updateDocument(new Term("id", Integer.toString(docId)), d);
+              } else { // deletes
+                modifier.deleteDocuments(new Term("id", Integer.toString(docId)));
+                // modifier.setNorm(docId, "contents", (float)2.0);
+              }
+              docId += 12;
+            }
+          }
+          modifier.close();
+          success = true;
+          if (0 == x) {
+            done = true;
+          }
+        }
+        catch (IOException e) {
+          if (VERBOSE) {
+            System.out.println("  hit IOException: " + e);
+            e.printStackTrace(System.out);
+          }
+          err = e;
+          if (1 == x) {
+            e.printStackTrace();
+            fail(testName + " hit IOException after disk space was freed up");
+          }
+        }
+
+        if (!success) {
+          // Must force the close else the writer can have
+          // open files which cause exc in MockRAMDir.close
+          modifier.rollback();
+        }
+
+        // If the close() succeeded, make sure there are
+        // no unreferenced files.
+        if (success) {
+          _TestUtil.checkIndex(dir);
+          TestIndexWriter.assertNoUnreferencedFiles(dir, "after writer.close");
+        }
+
+        // Finally, verify index is not corrupt, and, if
+        // we succeeded, we see all docs changed, and if
+        // we failed, we see either all docs or no docs
+        // changed (transactional semantics):
+        IndexReader newReader = null;
+        try {
+          newReader = IndexReader.open(dir, true);
+        }
+        catch (IOException e) {
+          e.printStackTrace();
+          fail(testName
+               + ":exception when creating IndexReader after disk full during close: "
+               + e);
+        }
+
+        IndexSearcher searcher = newSearcher(newReader);
+        ScoreDoc[] hits = null;
+        try {
+          hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+        }
+        catch (IOException e) {
+          e.printStackTrace();
+          fail(testName + ": exception when searching: " + e);
+        }
+        int result2 = hits.length;
+        if (success) {
+          if (x == 0 && result2 != END_COUNT) {
+            fail(testName
+                 + ": method did not throw exception but hits.length for search on term 'aaa' is "
+                 + result2 + " instead of expected " + END_COUNT);
+          } else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT) {
+            // It's possible that the first exception was
+            // "recoverable" wrt pending deletes, in which
+            // case the pending deletes are retained and
+            // then re-flushing (with plenty of disk
+            // space) will succeed in flushing the
+            // deletes:
+            fail(testName
+                 + ": method did not throw exception but hits.length for search on term 'aaa' is "
+                 + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT);
+          }
+        } else {
+          // On hitting exception we still may have added
+          // all docs:
+          if (result2 != START_COUNT && result2 != END_COUNT) {
+            err.printStackTrace();
+            fail(testName
+                 + ": method did throw exception but hits.length for search on term 'aaa' is "
+                 + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT);
+          }
+        }
+
+        searcher.close();
+        newReader.close();
+      }
+
+      modifier.close();
+      dir.close();
+
+      // Try again with 10 more bytes of free space:
+      diskFree += 10;
+    }
+    startDir.close();
+  }
+
+  // This test tests that buffered deletes are cleared when
+  // an Exception is hit during flush.
+  public void testErrorAfterApplyDeletes() throws IOException {
+    
+    MockDirectoryWrapper.Failure failure = new MockDirectoryWrapper.Failure() {
+        boolean sawMaybe = false;
+        boolean failed = false;
+        Thread thread;
+        @Override
+        public MockDirectoryWrapper.Failure reset() {
+          thread = Thread.currentThread();
+          sawMaybe = false;
+          failed = false;
+          return this;
+        }
+        @Override
+        public void eval(MockDirectoryWrapper dir)  throws IOException {
+          if (Thread.currentThread() != thread) {
+            // don't fail during merging
+            return;
+          }
+          if (sawMaybe && !failed) {
+            boolean seen = false;
+            StackTraceElement[] trace = new Exception().getStackTrace();
+            for (int i = 0; i < trace.length; i++) {
+              if ("applyDeletes".equals(trace[i].getMethodName())) {
+                seen = true;
+                break;
+              }
+            }
+            if (!seen) {
+              // Only fail once we are no longer in applyDeletes
+              failed = true;
+              if (VERBOSE) {
+                System.out.println("TEST: mock failure: now fail");
+                new Throwable().printStackTrace(System.out);
+              }
+              throw new IOException("fail after applyDeletes");
+            }
+          }
+          if (!failed) {
+            StackTraceElement[] trace = new Exception().getStackTrace();
+            for (int i = 0; i < trace.length; i++) {
+              if ("applyDeletes".equals(trace[i].getMethodName())) {
+                if (VERBOSE) {
+                  System.out.println("TEST: mock failure: saw applyDeletes");
+                  new Throwable().printStackTrace(System.out);
+                }
+                sawMaybe = true;
+                break;
+              }
+            }
+          }
+        }
+      };
+
+    // create a couple of files
+
+    String[] keywords = { "1", "2" };
+    String[] unindexed = { "Netherlands", "Italy" };
+    String[] unstored = { "Amsterdam has lots of bridges",
+        "Venice has lots of canals" };
+    String[] text = { "Amsterdam", "Venice" };
+
+    MockDirectoryWrapper dir = newDirectory();
+    IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
+                                                                     TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(2).setReaderPooling(false).setMergePolicy(newLogMergePolicy()));
+    modifier.setInfoStream(VERBOSE ? System.out : null);
+
+    LogMergePolicy lmp = (LogMergePolicy) modifier.getConfig().getMergePolicy();
+    lmp.setUseCompoundFile(true);
+
+    dir.failOn(failure.reset());
+
+    for (int i = 0; i < keywords.length; i++) {
+      Document doc = new Document();
+      doc.add(newField("id", keywords[i], Field.Store.YES,
+                        Field.Index.NOT_ANALYZED));
+      doc.add(newField("country", unindexed[i], Field.Store.YES,
+                        Field.Index.NO));
+      doc.add(newField("contents", unstored[i], Field.Store.NO,
+                        Field.Index.ANALYZED));
+      doc.add(newField("city", text[i], Field.Store.YES,
+                        Field.Index.ANALYZED));
+      modifier.addDocument(doc);
+    }
+    // flush (and commit if ac)
+
+    if (VERBOSE) {
+      System.out.println("TEST: now optimize");
+    }
+
+    modifier.optimize();
+    if (VERBOSE) {
+      System.out.println("TEST: now commit");
+    }
+    modifier.commit();
+
+    // one of the two files hits
+
+    Term term = new Term("city", "Amsterdam");
+    int hitCount = getHitCount(dir, term);
+    assertEquals(1, hitCount);
+
+    // open the writer again (closed above)
+
+    // delete the doc
+    // max buf del terms is two, so this is buffered
+
+    if (VERBOSE) {
+      System.out.println("TEST: delete term=" + term);
+    }
+
+    modifier.deleteDocuments(term);
+
+    // add a doc (needed for the !ac case; see below)
+    // doc remains buffered
+
+    if (VERBOSE) {
+      System.out.println("TEST: add empty doc");
+    }
+    Document doc = new Document();
+    modifier.addDocument(doc);
+
+    // commit the changes, the buffered deletes, and the new doc
+
+    // The failure object will fail on the first write after the del
+    // file gets created when processing the buffered delete
+
+    // in the ac case, this will be when writing the new segments
+    // files so we really don't need the new doc, but it's harmless
+
+    // a new segments file won't be created but in this
+    // case, creation of the cfs file happens next so we
+    // need the doc (to test that it's okay that we don't
+    // lose deletes if failing while creating the cfs file)
+    boolean failed = false;
+    try {
+      if (VERBOSE) {
+        System.out.println("TEST: now commit for failure");
+      }
+      modifier.commit();
+    } catch (IOException ioe) {
+      // expected
+      failed = true;
+    }
+
+    assertTrue(failed);
+
+    // The commit above failed, so we need to retry it (which will
+    // succeed, because the failure is a one-shot)
+
+    modifier.commit();
+
+    hitCount = getHitCount(dir, term);
+
+    // Make sure the delete was successfully flushed:
+    assertEquals(0, hitCount);
+
+    modifier.close();
+    dir.close();
+  }
+
+  // This test tests that the files created by the docs writer before
+  // a segment is written are cleaned up if there's an i/o error
+
+  public void testErrorInDocsWriterAdd() throws IOException {
+    
+    MockDirectoryWrapper.Failure failure = new MockDirectoryWrapper.Failure() {
+        boolean failed = false;
+        @Override
+        public MockDirectoryWrapper.Failure reset() {
+          failed = false;
+          return this;
+        }
+        @Override
+        public void eval(MockDirectoryWrapper dir)  throws IOException {
+          if (!failed) {
+            failed = true;
+            throw new IOException("fail in add doc");
+          }
+        }
+      };
+
+    // create a couple of files
+
+    String[] keywords = { "1", "2" };
+    String[] unindexed = { "Netherlands", "Italy" };
+    String[] unstored = { "Amsterdam has lots of bridges",
+        "Venice has lots of canals" };
+    String[] text = { "Amsterdam", "Venice" };
+
+    MockDirectoryWrapper dir = newDirectory();
+    IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
+    modifier.commit();
+    dir.failOn(failure.reset());
+
+    for (int i = 0; i < keywords.length; i++) {
+      Document doc = new Document();
+      doc.add(newField("id", keywords[i], Field.Store.YES,
+                        Field.Index.NOT_ANALYZED));
+      doc.add(newField("country", unindexed[i], Field.Store.YES,
+                        Field.Index.NO));
+      doc.add(newField("contents", unstored[i], Field.Store.NO,
+                        Field.Index.ANALYZED));
+      doc.add(newField("city", text[i], Field.Store.YES,
+                        Field.Index.ANALYZED));
+      try {
+        modifier.addDocument(doc);
+      } catch (IOException io) {
+        if (VERBOSE) {
+          System.out.println("TEST: got expected exc:");
+          io.printStackTrace(System.out);
+        }
+        break;
+      }
+    }
+
+    String[] startFiles = dir.listAll();
+    SegmentInfos infos = new SegmentInfos();
+    infos.read(dir);
+    new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null);
+    String[] endFiles = dir.listAll();
+    modifier.close();
+    dir.close();
+
+    if (!Arrays.equals(startFiles, endFiles)) {
+      fail("docswriter abort() failed to delete unreferenced files:\n  before delete:\n    "
+           + arrayToString(startFiles) + "\n  after delete:\n    "
+           + arrayToString(endFiles));
+    }
+
+    modifier.close();
+
+  }
+
+  private String arrayToString(String[] l) {
+    String s = "";
+    for (int i = 0; i < l.length; i++) {
+      if (i > 0) {
+        s += "\n    ";
+      }
+      s += l[i];
+    }
+    return s;
+  }
+  
+  public void testDeleteAllSlowly() throws Exception {
+    final Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random, dir);
+    final int NUM_DOCS = atLeast(1000);
+    final List<Integer> ids = new ArrayList<Integer>(NUM_DOCS);
+    for(int id=0;id<NUM_DOCS;id++) {
+      ids.add(id);
+    }
+    Collections.shuffle(ids, random);
+    for(int id : ids) {
+      Document doc = new Document();
+      doc.add(newField("id", ""+id, Field.Index.NOT_ANALYZED));
+      w.addDocument(doc);
+    }
+    Collections.shuffle(ids, random);
+    int upto = 0;
+    while(upto < ids.size()) {
+      final int left = ids.size() - upto;
+      final int inc = Math.min(left, _TestUtil.nextInt(random, 1, 20));
+      final int limit = upto + inc;
+      while(upto < limit) {
+        w.deleteDocuments(new Term("id", ""+ids.get(upto++)));
+      }
+      final IndexReader r = w.getReader();
+      assertEquals(NUM_DOCS - upto, r.numDocs());
+      r.close();
+    }
+
+    w.close();
+    dir.close();
+  }
+  
+  public void testIndexingThenDeleting() throws Exception {
+    final Random r = random;
+    Directory dir = newDirectory();
+    // note this test explicitly disables payloads
+    final Analyzer analyzer = new Analyzer() {
+      @Override
+      public TokenStream tokenStream(String fieldName, Reader reader) {
+        return new MockTokenizer(reader, MockTokenizer.WHITESPACE, true);
+      }
+    };
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setRAMBufferSizeMB(1.0).setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH).setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH));
+    w.setInfoStream(VERBOSE ? System.out : null);
+    Document doc = new Document();
+    doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
+    int num = atLeast(3);
+    for (int iter = 0; iter < num; iter++) {
+      int count = 0;
+
+      final boolean doIndexing = r.nextBoolean();
+      if (VERBOSE) {
+        System.out.println("TEST: iter doIndexing=" + doIndexing);
+      }
+      if (doIndexing) {
+        // Add docs until a flush is triggered
+        final int startFlushCount = w.getFlushCount();
+        while(w.getFlushCount() == startFlushCount) {
+          w.addDocument(doc);
+          count++;
+        }
+      } else {
+        // Delete docs until a flush is triggered
+        final int startFlushCount = w.getFlushCount();
+        while(w.getFlushCount() == startFlushCount) {
+          w.deleteDocuments(new Term("foo", ""+count));
+          count++;
+        }
+      }
+      assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 3000);
+    }
+    w.close();
+    dir.close();
+  }
+
+  // LUCENE-3340: make sure deletes that we don't apply
+  // during flush (ie are just pushed into the stream) are
+  // in fact later flushed due to their RAM usage:
+  public void testFlushPushedDeletesByRAM() throws Exception {
+    Directory dir = newDirectory();
+    // Cannot use RandomIndexWriter because we don't want to
+    // ever call commit() for this test:
+    // note: tiny rambuffer used, as with a 1MB buffer the test is too slow (flush @ 128,999)
+    IndexWriter w = new IndexWriter(dir,
+                                    newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
+                                    .setRAMBufferSizeMB(0.2f).setMaxBufferedDocs(1000).setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES).setReaderPooling(false));
+    w.setInfoStream(VERBOSE ? System.out : null);
+    int count = 0;
+    while(true) {
+      Document doc = new Document();
+      doc.add(new Field("id", count+"", Field.Store.NO, Field.Index.NOT_ANALYZED));
+      final Term delTerm;
+      if (count == 1010) {
+        // This is the only delete that applies
+        delTerm = new Term("id", ""+0);
+      } else {
+        // These get buffered, taking up RAM, but delete
+        // nothing when applied:
+        delTerm = new Term("id", "x" + count);
+      }
+      w.updateDocument(delTerm, doc);
+      // Eventually segment 0 should get a del docs:
+      if (dir.fileExists("_0_1.del")) {
+        if (VERBOSE) {
+          System.out.println("TEST: deletes created @ count=" + count);
+        }
+        break;
+      }
+      count++;
+
+      // Today we applyDeletes @ count=21553; even if we make
+      // sizable improvements to RAM efficiency of buffered
+      // del term we're unlikely to go over 100K:
+      if (count > 100000) {
+        fail("delete's were not applied");
+      }
+    }
+    w.close();
+    dir.close();
+  }
+
+  // LUCENE-3340: make sure deletes that we don't apply
+  // during flush (ie are just pushed into the stream) are
+  // in fact later flushed due to their RAM usage:
+  public void testFlushPushedDeletesByCount() throws Exception {
+    Directory dir = newDirectory();
+    // Cannot use RandomIndexWriter because we don't want to
+    // ever call commit() for this test:
+    final int flushAtDelCount = atLeast(1020);
+    IndexWriter w = new IndexWriter(dir,
+                                    newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+                                    setMaxBufferedDeleteTerms(flushAtDelCount).setMaxBufferedDocs(1000).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES).setReaderPooling(false));
+    w.setInfoStream(VERBOSE ? System.out : null);
+    if (VERBOSE) {
+      System.out.println("TEST: flush @ " + flushAtDelCount + " buffered delete terms");
+    }
+    int count = 0;
+    while(true) {
+      Document doc = new Document();
+      doc.add(new Field("id", count+"", Field.Store.NO, Field.Index.NOT_ANALYZED));
+      final Term delTerm;
+      if (count == 1010) {
+        // This is the only delete that applies
+        delTerm = new Term("id", ""+0);
+      } else {
+        // These get buffered, taking up RAM, but delete
+        // nothing when applied:
+        delTerm = new Term("id", "x" + count);
+      }
+      w.updateDocument(delTerm, doc);
+      // Eventually segment 0 should get a del docs:
+      if (dir.fileExists("_0_1.del")) {
+        break;
+      }
+      count++;
+      if (count > flushAtDelCount) {
+        fail("delete's were not applied at count=" + flushAtDelCount);
+      }
+    }
+    w.close();
+    dir.close();
+  }
+
+  // Make sure buffered (pushed) deletes don't use up so
+  // much RAM that it forces long tail of tiny segments:
+  public void testApplyDeletesOnFlush() throws Exception {
+    Directory dir = newDirectory();
+    // Cannot use RandomIndexWriter because we don't want to
+    // ever call commit() for this test:
+    final AtomicInteger docsInSegment = new AtomicInteger();
+    final AtomicBoolean closing = new AtomicBoolean();
+    final AtomicBoolean sawAfterFlush = new AtomicBoolean();
+    IndexWriter w = new IndexWriter(dir,
+                                    newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+                                    setRAMBufferSizeMB(0.5).setMaxBufferedDocs(-1).setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES).setReaderPooling(false)) {
+        @Override
+        public void doAfterFlush() {
+          assertTrue("only " + docsInSegment.get() + " in segment", closing.get() || docsInSegment.get() >= 7);
+          docsInSegment.set(0);
+          sawAfterFlush.set(true);
+        }
+      };
+    w.setInfoStream(VERBOSE ? System.out : null);
+    int id = 0;
+    while(true) {
+      StringBuilder sb = new StringBuilder();
+      for(int termIDX=0;termIDX<100;termIDX++) {
+        sb.append(' ').append(_TestUtil.randomRealisticUnicodeString(random));
+      }
+      if (id == 500) {
+        w.deleteDocuments(new Term("id", "0"));
+      }
+      Document doc = new Document();
+      doc.add(newField("id", ""+id, Field.Index.NOT_ANALYZED));
+      doc.add(newField("body", sb.toString(), Field.Index.ANALYZED));
+      w.updateDocument(new Term("id", ""+id), doc);
+      docsInSegment.incrementAndGet();
+      if (dir.fileExists("_0_1.del")) {
+        if (VERBOSE) {
+          System.out.println("TEST: deletes created @ id=" + id);
+        }
+        break;
+      }
+      id++;
+    }
+    closing.set(true);
+    assertTrue(sawAfterFlush.get());
+    w.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
new file mode 100644
index 0000000..66a9011
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
@@ -0,0 +1,1296 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.io.Reader;
+import java.io.StringReader;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.PhraseQuery;
+
+public class TestIndexWriterExceptions extends LuceneTestCase {
+
+  private class IndexerThread extends Thread {
+
+    IndexWriter writer;
+
+    final Random r = new Random(random.nextLong());
+    volatile Throwable failure;
+
+    public IndexerThread(int i, IndexWriter writer) {
+      setName("Indexer " + i);
+      this.writer = writer;
+    }
+
+    @Override
+    public void run() {
+
+      final Document doc = new Document();
+
+      doc.add(newField(r, "content1", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.ANALYZED));
+      doc.add(newField(r, "content6", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(newField(r, "content2", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NOT_ANALYZED));
+      doc.add(newField(r, "content3", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NO));
+
+      doc.add(newField(r, "content4", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED));
+      doc.add(newField(r, "content5", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED));
+
+      doc.add(newField(r, "content7", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+
+      final Field idField = newField(r, "id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+      doc.add(idField);
+
+      final long stopTime = System.currentTimeMillis() + 500;
+
+      do {
+        if (VERBOSE) {
+          System.out.println(Thread.currentThread().getName() + ": TEST: IndexerThread: cycle");
+        }
+        doFail.set(this);
+        final String id = ""+r.nextInt(50);
+        idField.setValue(id);
+        Term idTerm = new Term("id", id);
+        try {
+          if (r.nextBoolean()) {
+            final List<Document> docs = new ArrayList<Document>();
+            final int count =  _TestUtil.nextInt(r, 1, 20);
+            for(int c=0;c<count;c++) {
+              docs.add(doc);
+            }
+            writer.updateDocuments(idTerm, docs);
+          } else {
+            writer.updateDocument(idTerm, doc);
+          }
+        } catch (RuntimeException re) {
+          if (VERBOSE) {
+            System.out.println(Thread.currentThread().getName() + ": EXC: ");
+            re.printStackTrace(System.out);
+          }
+          try {
+            _TestUtil.checkIndex(writer.getDirectory());
+          } catch (IOException ioe) {
+            System.out.println(Thread.currentThread().getName() + ": unexpected exception1");
+            ioe.printStackTrace(System.out);
+            failure = ioe;
+            break;
+          }
+        } catch (Throwable t) {
+          System.out.println(Thread.currentThread().getName() + ": unexpected exception2");
+          t.printStackTrace(System.out);
+          failure = t;
+          break;
+        }
+
+        doFail.set(null);
+
+        // After a possible exception (above) I should be able
+        // to add a new document without hitting an
+        // exception:
+        try {
+          writer.updateDocument(idTerm, doc);
+        } catch (Throwable t) {
+          System.out.println(Thread.currentThread().getName() + ": unexpected exception3");
+          t.printStackTrace(System.out);
+          failure = t;
+          break;
+        }
+      } while(System.currentTimeMillis() < stopTime);
+    }
+  }
+
+  ThreadLocal<Thread> doFail = new ThreadLocal<Thread>();
+
+  private class MockIndexWriter extends IndexWriter {
+    Random r = new Random(random.nextLong());
+
+    public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
+      super(dir, conf);
+    }
+
+    @Override
+    boolean testPoint(String name) {
+      if (doFail.get() != null && !name.equals("startDoFlush") && r.nextInt(40) == 17) {
+        if (VERBOSE) {
+          System.out.println(Thread.currentThread().getName() + ": NOW FAIL: " + name);
+          new Throwable().printStackTrace(System.out);
+        }
+        throw new RuntimeException(Thread.currentThread().getName() + ": intentionally failing at " + name);
+      }
+      return true;
+    }
+  }
+
+  public void testRandomExceptions() throws Throwable {
+    if (VERBOSE) {
+      System.out.println("\nTEST: start testRandomExceptions");
+    }
+    MockDirectoryWrapper dir = newDirectory();
+
+    MockAnalyzer analyzer = new MockAnalyzer(random);
+    analyzer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
+    MockIndexWriter writer  = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)
+        .setRAMBufferSizeMB(0.1).setMergeScheduler(new ConcurrentMergeScheduler()));
+    ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).setSuppressExceptions();
+    //writer.setMaxBufferedDocs(10);
+    if (VERBOSE) {
+      System.out.println("TEST: initial commit");
+    }
+    writer.commit();
+
+    if (VERBOSE) {
+      writer.setInfoStream(System.out);
+    }
+
+    IndexerThread thread = new IndexerThread(0, writer);
+    thread.run();
+    if (thread.failure != null) {
+      thread.failure.printStackTrace(System.out);
+      fail("thread " + thread.getName() + ": hit unexpected failure");
+    }
+
+    if (VERBOSE) {
+      System.out.println("TEST: commit after thread start");
+    }
+    writer.commit();
+
+    try {
+      writer.close();
+    } catch (Throwable t) {
+      System.out.println("exception during close:");
+      t.printStackTrace(System.out);
+      writer.rollback();
+    }
+
+    // Confirm that when doc hits exception partway through tokenization, it's deleted:
+    IndexReader r2 = IndexReader.open(dir, true);
+    final int count = r2.docFreq(new Term("content4", "aaa"));
+    final int count2 = r2.docFreq(new Term("content4", "ddd"));
+    assertEquals(count, count2);
+    r2.close();
+
+    dir.close();
+  }
+
+  public void testRandomExceptionsThreads() throws Throwable {
+    MockDirectoryWrapper dir = newDirectory();
+    MockAnalyzer analyzer = new MockAnalyzer(random);
+    analyzer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
+    MockIndexWriter writer  = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)
+        .setRAMBufferSizeMB(0.2).setMergeScheduler(new ConcurrentMergeScheduler()));
+    ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).setSuppressExceptions();
+    //writer.setMaxBufferedDocs(10);
+    writer.commit();
+
+    if (VERBOSE) {
+      writer.setInfoStream(System.out);
+    }
+
+    final int NUM_THREADS = 4;
+
+    final IndexerThread[] threads = new IndexerThread[NUM_THREADS];
+    for(int i=0;i<NUM_THREADS;i++) {
+      threads[i] = new IndexerThread(i, writer);
+      threads[i].start();
+    }
+
+    for(int i=0;i<NUM_THREADS;i++)
+      threads[i].join();
+
+    for(int i=0;i<NUM_THREADS;i++)
+      if (threads[i].failure != null)
+        fail("thread " + threads[i].getName() + ": hit unexpected failure");
+
+    writer.commit();
+
+    try {
+      writer.close();
+    } catch (Throwable t) {
+      System.out.println("exception during close:");
+      t.printStackTrace(System.out);
+      writer.rollback();
+    }
+
+    // Confirm that when doc hits exception partway through tokenization, it's deleted:
+    IndexReader r2 = IndexReader.open(dir, true);
+    final int count = r2.docFreq(new Term("content4", "aaa"));
+    final int count2 = r2.docFreq(new Term("content4", "ddd"));
+    assertEquals(count, count2);
+    r2.close();
+
+    dir.close();
+  }
+  
+  // LUCENE-1198
+  private static final class MockIndexWriter2 extends IndexWriter {
+
+    public MockIndexWriter2(Directory dir, IndexWriterConfig conf) throws IOException {
+      super(dir, conf);
+    }
+
+    boolean doFail;
+
+    @Override
+    boolean testPoint(String name) {
+      if (doFail && name.equals("DocumentsWriter.ThreadState.init start"))
+        throw new RuntimeException("intentionally failing");
+      return true;
+    }
+  }
+  
+  private static String CRASH_FAIL_MESSAGE = "I'm experiencing problems";
+
+  private class CrashingFilter extends TokenFilter {
+    String fieldName;
+    int count;
+
+    public CrashingFilter(String fieldName, TokenStream input) {
+      super(input);
+      this.fieldName = fieldName;
+    }
+
+    @Override
+    public boolean incrementToken() throws IOException {
+      if (this.fieldName.equals("crash") && count++ >= 4)
+        throw new IOException(CRASH_FAIL_MESSAGE);
+      return input.incrementToken();
+    }
+
+    @Override
+    public void reset() throws IOException {
+      super.reset();
+      count = 0;
+    }
+  }
+
+  public void testExceptionDocumentsWriterInit() throws IOException {
+    Directory dir = newDirectory();
+    MockIndexWriter2 w = new MockIndexWriter2(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    w.setInfoStream(VERBOSE ? System.out : null);
+    Document doc = new Document();
+    doc.add(newField("field", "a field", Field.Store.YES,
+                      Field.Index.ANALYZED));
+    w.addDocument(doc);
+    w.doFail = true;
+    try {
+      w.addDocument(doc);
+      fail("did not hit exception");
+    } catch (RuntimeException re) {
+      // expected
+    }
+    w.close();
+    dir.close();
+  }
+
+  // LUCENE-1208
+  public void testExceptionJustBeforeFlush() throws IOException {
+    Directory dir = newDirectory();
+    MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
+    w.setInfoStream(VERBOSE ? System.out : null);
+    Document doc = new Document();
+    doc.add(newField("field", "a field", Field.Store.YES,
+                      Field.Index.ANALYZED));
+    w.addDocument(doc);
+
+    Analyzer analyzer = new Analyzer() {
+      @Override
+      public TokenStream tokenStream(String fieldName, Reader reader) {
+        MockTokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+        tokenizer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
+        return new CrashingFilter(fieldName, tokenizer);
+      }
+    };
+
+    Document crashDoc = new Document();
+    crashDoc.add(newField("crash", "do it on token 4", Field.Store.YES,
+                           Field.Index.ANALYZED));
+    try {
+      w.addDocument(crashDoc, analyzer);
+      fail("did not hit expected exception");
+    } catch (IOException ioe) {
+      // expected
+    }
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }    
+
+  private static final class MockIndexWriter3 extends IndexWriter {
+
+    public MockIndexWriter3(Directory dir, IndexWriterConfig conf) throws IOException {
+      super(dir, conf);
+    }
+
+    boolean doFail;
+    boolean failed;
+
+    @Override
+    boolean testPoint(String name) {
+      if (doFail && name.equals("startMergeInit")) {
+        failed = true;
+        throw new RuntimeException("intentionally failing");
+      }
+      return true;
+    }
+  }
+  
+
+  // LUCENE-1210
+  public void testExceptionOnMergeInit() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
+      .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()).setMergePolicy(newLogMergePolicy());
+    ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
+    MockIndexWriter3 w = new MockIndexWriter3(dir, conf);
+    w.doFail = true;
+    Document doc = new Document();
+    doc.add(newField("field", "a field", Field.Store.YES,
+                      Field.Index.ANALYZED));
+    for(int i=0;i<10;i++)
+      try {
+        w.addDocument(doc);
+      } catch (RuntimeException re) {
+        break;
+      }
+
+    ((ConcurrentMergeScheduler) w.getConfig().getMergeScheduler()).sync();
+    assertTrue(w.failed);
+    w.close();
+    dir.close();
+  }
+  
+  // LUCENE-1072
+  public void testExceptionFromTokenStream() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new Analyzer() {
+
+      @Override
+      public TokenStream tokenStream(String fieldName, Reader reader) {
+        MockTokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.SIMPLE, true);
+        tokenizer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
+        return new TokenFilter(tokenizer) {
+          private int count = 0;
+
+          @Override
+          public boolean incrementToken() throws IOException {
+            if (count++ == 5) {
+              throw new IOException();
+            }
+            return input.incrementToken();
+          }
+        };
+      }
+
+    });
+    conf.setMaxBufferedDocs(Math.max(3, conf.getMaxBufferedDocs()));
+
+    IndexWriter writer = new IndexWriter(dir, conf);
+
+    Document doc = new Document();
+    String contents = "aa bb cc dd ee ff gg hh ii jj kk";
+    doc.add(newField("content", contents, Field.Store.NO,
+        Field.Index.ANALYZED));
+    try {
+      writer.addDocument(doc);
+      fail("did not hit expected exception");
+    } catch (Exception e) {
+    }
+
+    // Make sure we can add another normal document
+    doc = new Document();
+    doc.add(newField("content", "aa bb cc dd", Field.Store.NO,
+        Field.Index.ANALYZED));
+    writer.addDocument(doc);
+
+    // Make sure we can add another normal document
+    doc = new Document();
+    doc.add(newField("content", "aa bb cc dd", Field.Store.NO,
+        Field.Index.ANALYZED));
+    writer.addDocument(doc);
+
+    writer.close();
+    IndexReader reader = IndexReader.open(dir, true);
+    final Term t = new Term("content", "aa");
+    assertEquals(3, reader.docFreq(t));
+
+    // Make sure the doc that hit the exception was marked
+    // as deleted:
+    TermDocs tdocs = reader.termDocs(t);
+    int count = 0;
+    while(tdocs.next()) {
+      count++;
+    }
+    assertEquals(2, count);
+
+    assertEquals(reader.docFreq(new Term("content", "gg")), 0);
+    reader.close();
+    dir.close();
+  }
+
+  private static class FailOnlyOnFlush extends MockDirectoryWrapper.Failure {
+    boolean doFail = false;
+    int count;
+
+    @Override
+    public void setDoFail() {
+      this.doFail = true;
+    }
+    @Override
+    public void clearDoFail() {
+      this.doFail = false;
+    }
+
+    @Override
+    public void eval(MockDirectoryWrapper dir)  throws IOException {
+      if (doFail) {
+        StackTraceElement[] trace = new Exception().getStackTrace();
+        boolean sawAppend = false;
+        boolean sawFlush = false;
+        for (int i = 0; i < trace.length; i++) {
+          if ("org.apache.lucene.index.FreqProxTermsWriter".equals(trace[i].getClassName()) && "appendPostings".equals(trace[i].getMethodName()))
+            sawAppend = true;
+          if ("doFlush".equals(trace[i].getMethodName()))
+            sawFlush = true;
+        }
+
+        if (sawAppend && sawFlush && count++ >= 30) {
+          doFail = false;
+          throw new IOException("now failing during flush");
+        }
+      }
+    }
+  }
+
+  // LUCENE-1072: make sure an errant exception on flushing
+  // one segment only takes out those docs in that one flush
+  public void testDocumentsWriterAbort() throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+    FailOnlyOnFlush failure = new FailOnlyOnFlush();
+    failure.setDoFail();
+    dir.failOn(failure);
+
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
+    Document doc = new Document();
+    String contents = "aa bb cc dd ee ff gg hh ii jj kk";
+    doc.add(newField("content", contents, Field.Store.NO,
+        Field.Index.ANALYZED));
+    boolean hitError = false;
+    for(int i=0;i<200;i++) {
+      try {
+        writer.addDocument(doc);
+      } catch (IOException ioe) {
+        // only one flush should fail:
+        assertFalse(hitError);
+        hitError = true;
+      }
+    }
+    assertTrue(hitError);
+    writer.close();
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(198, reader.docFreq(new Term("content", "aa")));
+    reader.close();
+    dir.close();
+  }
+
+  public void testDocumentsWriterExceptions() throws IOException {
+    Analyzer analyzer = new Analyzer() {
+      @Override
+      public TokenStream tokenStream(String fieldName, Reader reader) {
+        MockTokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+        tokenizer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
+        return new CrashingFilter(fieldName, tokenizer);
+      }
+    };
+
+    for(int i=0;i<2;i++) {
+      if (VERBOSE) {
+        System.out.println("TEST: cycle i=" + i);
+      }
+      MockDirectoryWrapper dir = newDirectory();
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMergePolicy(newLogMergePolicy()));
+      writer.setInfoStream(VERBOSE ? System.out : null);
+
+      // don't allow a sudden merge to clean up the deleted
+      // doc below:
+      LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
+      lmp.setMergeFactor(Math.max(lmp.getMergeFactor(), 5));
+
+      Document doc = new Document();
+      doc.add(newField("contents", "here are some contents", Field.Store.YES,
+                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      writer.addDocument(doc);
+      writer.addDocument(doc);
+      doc.add(newField("crash", "this should crash after 4 terms", Field.Store.YES,
+                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(newField("other", "this will not get indexed", Field.Store.YES,
+                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      try {
+        writer.addDocument(doc);
+        fail("did not hit expected exception");
+      } catch (IOException ioe) {
+        if (VERBOSE) {
+          System.out.println("TEST: hit expected exception");
+          ioe.printStackTrace(System.out);
+        }
+      }
+
+      if (0 == i) {
+        doc = new Document();
+        doc.add(newField("contents", "here are some contents", Field.Store.YES,
+                          Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+        writer.addDocument(doc);
+        writer.addDocument(doc);
+      }
+      writer.close();
+
+      if (VERBOSE) {
+        System.out.println("TEST: open reader");
+      }
+      IndexReader reader = IndexReader.open(dir, true);
+      if (i == 0) { 
+        int expected = 5;
+        assertEquals(expected, reader.docFreq(new Term("contents", "here")));
+        assertEquals(expected, reader.maxDoc());
+        int numDel = 0;
+        for(int j=0;j<reader.maxDoc();j++) {
+          if (reader.isDeleted(j))
+            numDel++;
+          else {
+            reader.document(j);
+            reader.getTermFreqVectors(j);
+          }
+        }
+        assertEquals(1, numDel);
+      }
+      reader.close();
+
+      writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
+          analyzer).setMaxBufferedDocs(10));
+      doc = new Document();
+      doc.add(newField("contents", "here are some contents", Field.Store.YES,
+                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      for(int j=0;j<17;j++)
+        writer.addDocument(doc);
+      writer.optimize();
+      writer.close();
+
+      reader = IndexReader.open(dir, true);
+      int expected = 19+(1-i)*2;
+      assertEquals(expected, reader.docFreq(new Term("contents", "here")));
+      assertEquals(expected, reader.maxDoc());
+      int numDel = 0;
+      for(int j=0;j<reader.maxDoc();j++) {
+        if (reader.isDeleted(j))
+          numDel++;
+        else {
+          reader.document(j);
+          reader.getTermFreqVectors(j);
+        }
+      }
+      reader.close();
+      assertEquals(0, numDel);
+
+      dir.close();
+    }
+  }
+
+  public void testDocumentsWriterExceptionThreads() throws Exception {
+    Analyzer analyzer = new Analyzer() {
+      @Override
+      public TokenStream tokenStream(String fieldName, Reader reader) {
+        MockTokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+        tokenizer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
+        return new CrashingFilter(fieldName, tokenizer);
+      }
+    };
+
+    final int NUM_THREAD = 3;
+    final int NUM_ITER = 100;
+
+    for(int i=0;i<2;i++) {
+      MockDirectoryWrapper dir = newDirectory();
+
+      {
+        final IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(-1)
+                                                   .setMergePolicy(newLogMergePolicy(10)));
+        final int finalI = i;
+
+        Thread[] threads = new Thread[NUM_THREAD];
+        for(int t=0;t<NUM_THREAD;t++) {
+          threads[t] = new Thread() {
+              @Override
+              public void run() {
+                try {
+                  for(int iter=0;iter<NUM_ITER;iter++) {
+                    Document doc = new Document();
+                    doc.add(newField("contents", "here are some contents", Field.Store.YES,
+                                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                    writer.addDocument(doc);
+                    writer.addDocument(doc);
+                    doc.add(newField("crash", "this should crash after 4 terms", Field.Store.YES,
+                                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                    doc.add(newField("other", "this will not get indexed", Field.Store.YES,
+                                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                    try {
+                      writer.addDocument(doc);
+                      fail("did not hit expected exception");
+                    } catch (IOException ioe) {
+                    }
+
+                    if (0 == finalI) {
+                      doc = new Document();
+                      doc.add(newField("contents", "here are some contents", Field.Store.YES,
+                                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                      writer.addDocument(doc);
+                      writer.addDocument(doc);
+                    }
+                  }
+                } catch (Throwable t) {
+                  synchronized(this) {
+                    System.out.println(Thread.currentThread().getName() + ": ERROR: hit unexpected exception");
+                    t.printStackTrace(System.out);
+                  }
+                  fail();
+                }
+              }
+            };
+          threads[t].start();
+        }
+
+        for(int t=0;t<NUM_THREAD;t++)
+          threads[t].join();
+            
+        writer.close();
+      }
+
+      IndexReader reader = IndexReader.open(dir, true);
+      int expected = (3+(1-i)*2)*NUM_THREAD*NUM_ITER;
+      assertEquals("i=" + i, expected, reader.docFreq(new Term("contents", "here")));
+      assertEquals(expected, reader.maxDoc());
+      int numDel = 0;
+      for(int j=0;j<reader.maxDoc();j++) {
+        if (reader.isDeleted(j))
+          numDel++;
+        else {
+          reader.document(j);
+          reader.getTermFreqVectors(j);
+        }
+      }
+      reader.close();
+
+      assertEquals(NUM_THREAD*NUM_ITER, numDel);
+
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+          TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(10));
+      Document doc = new Document();
+      doc.add(newField("contents", "here are some contents", Field.Store.YES,
+                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      for(int j=0;j<17;j++)
+        writer.addDocument(doc);
+      writer.optimize();
+      writer.close();
+
+      reader = IndexReader.open(dir, true);
+      expected += 17-NUM_THREAD*NUM_ITER;
+      assertEquals(expected, reader.docFreq(new Term("contents", "here")));
+      assertEquals(expected, reader.maxDoc());
+      numDel = 0;
+      for(int j=0;j<reader.maxDoc();j++) {
+        if (reader.isDeleted(j))
+          numDel++;
+        else {
+          reader.document(j);
+          reader.getTermFreqVectors(j);
+        }
+      }
+      reader.close();
+
+      dir.close();
+    }
+  }
+  
+  // Throws IOException during MockDirectoryWrapper.sync
+  private static class FailOnlyInSync extends MockDirectoryWrapper.Failure {
+    boolean didFail;
+    @Override
+    public void eval(MockDirectoryWrapper dir)  throws IOException {
+      if (doFail) {
+        StackTraceElement[] trace = new Exception().getStackTrace();
+        for (int i = 0; i < trace.length; i++) {
+          if (doFail && "org.apache.lucene.store.MockDirectoryWrapper".equals(trace[i].getClassName()) && "sync".equals(trace[i].getMethodName())) {
+            didFail = true;
+            throw new IOException("now failing on purpose during sync");
+          }
+        }
+      }
+    }
+  }
+  
+  // TODO: these are also in TestIndexWriter... add a simple doc-writing method
+  // like this to LuceneTestCase?
+  private void addDoc(IndexWriter writer) throws IOException
+  {
+      Document doc = new Document();
+      doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+  }
+  
+  // LUCENE-1044: test exception during sync
+  public void testExceptionDuringSync() throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+    FailOnlyInSync failure = new FailOnlyInSync();
+    dir.failOn(failure);
+
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(2).
+            setMergeScheduler(new ConcurrentMergeScheduler()).
+            setMergePolicy(newLogMergePolicy(5))
+    );
+    failure.setDoFail();
+    ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
+
+    for (int i = 0; i < 23; i++) {
+      addDoc(writer);
+      if ((i-1)%2 == 0) {
+        try {
+          writer.commit();
+        } catch (IOException ioe) {
+          // expected
+        }
+      }
+    }
+
+    ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
+    assertTrue(failure.didFail);
+    failure.clearDoFail();
+    writer.close();
+
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(23, reader.numDocs());
+    reader.close();
+    dir.close();
+  }
+  
+  private static class FailOnlyInCommit extends MockDirectoryWrapper.Failure {
+
+    boolean fail1, fail2;
+
+    @Override
+    public void eval(MockDirectoryWrapper dir)  throws IOException {
+      StackTraceElement[] trace = new Exception().getStackTrace();
+      boolean isCommit = false;
+      boolean isDelete = false;
+      for (int i = 0; i < trace.length; i++) {
+        if ("org.apache.lucene.index.SegmentInfos".equals(trace[i].getClassName()) && "prepareCommit".equals(trace[i].getMethodName()))
+          isCommit = true;
+        if ("org.apache.lucene.store.MockDirectoryWrapper".equals(trace[i].getClassName()) && "deleteFile".equals(trace[i].getMethodName()))
+          isDelete = true;
+      }
+
+      if (isCommit) {
+        if (!isDelete) {
+          fail1 = true;
+          throw new RuntimeException("now fail first");
+        } else {
+          fail2 = true;
+          throw new IOException("now fail during delete");
+        }
+      }
+    }
+  }
+  
+  // LUCENE-1214
+  public void testExceptionsDuringCommit() throws Throwable {
+    MockDirectoryWrapper dir = newDirectory();
+    FailOnlyInCommit failure = new FailOnlyInCommit();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    doc.add(newField("field", "a field", Field.Store.YES,
+                      Field.Index.ANALYZED));
+    w.addDocument(doc);
+    dir.failOn(failure);
+    try {
+      w.close();
+      fail();
+    } catch (IOException ioe) {
+      fail("expected only RuntimeException");
+    } catch (RuntimeException re) {
+      // Expected
+    }
+    assertTrue(failure.fail1 && failure.fail2);
+    w.rollback();
+    dir.close();
+  }
+  
+  public void testOptimizeExceptions() throws IOException {
+    Directory startDir = newDirectory();
+    IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy());
+    ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(100);
+    IndexWriter w = new IndexWriter(startDir, conf);
+    for(int i=0;i<27;i++)
+      addDoc(w);
+    w.close();
+
+    int iter = TEST_NIGHTLY ? 200 : 20;
+    for(int i=0;i<iter;i++) {
+      if (VERBOSE) {
+        System.out.println("TEST: iter " + i);
+      }
+      MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir));
+      conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergeScheduler(new ConcurrentMergeScheduler());
+      ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
+      w = new IndexWriter(dir, conf);
+      w.setInfoStream(VERBOSE ? System.out : null);
+      dir.setRandomIOExceptionRate(0.5);
+      try {
+        w.optimize();
+      } catch (IOException ioe) {
+        if (ioe.getCause() == null)
+          fail("optimize threw IOException without root cause");
+      }
+      dir.setRandomIOExceptionRate(0);
+      w.close();
+      dir.close();
+    }
+    startDir.close();
+  }
+  
+  // LUCENE-1429
+  public void testOutOfMemoryErrorCausesCloseToFail() throws Exception {
+
+    final List<Throwable> thrown = new ArrayList<Throwable>();
+    final Directory dir = newDirectory();
+    final IndexWriter writer = new IndexWriter(dir,
+        newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))) {
+        @Override
+        public void message(final String message) {
+          if (message.startsWith("now flush at close") && 0 == thrown.size()) {
+            thrown.add(null);
+            throw new OutOfMemoryError("fake OOME at " + message);
+          }
+        }
+      };
+
+    // need to set an info stream so message is called
+    writer.setInfoStream(new PrintStream(new ByteArrayOutputStream()));
+    try {
+      writer.close();
+      fail("OutOfMemoryError expected");
+    }
+    catch (final OutOfMemoryError expected) {}
+
+    // throws IllegalStateEx w/o bug fix
+    writer.close();
+    dir.close();
+  }
+  
+  // LUCENE-1347
+  private static final class MockIndexWriter4 extends IndexWriter {
+
+    public MockIndexWriter4(Directory dir, IndexWriterConfig conf) throws IOException {
+      super(dir, conf);
+    }
+
+    boolean doFail;
+
+    @Override
+    boolean testPoint(String name) {
+      if (doFail && name.equals("rollback before checkpoint"))
+        throw new RuntimeException("intentionally failing");
+      return true;
+    }
+  }
+  
+  // LUCENE-1347
+  public void testRollbackExceptionHang() throws Throwable {
+    Directory dir = newDirectory();
+    MockIndexWriter4 w = new MockIndexWriter4(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+
+    addDoc(w);
+    w.doFail = true;
+    try {
+      w.rollback();
+      fail("did not hit intentional RuntimeException");
+    } catch (RuntimeException re) {
+      // expected
+    }
+    
+    w.doFail = false;
+    w.rollback();
+    dir.close();
+  }
+  
+  // LUCENE-1044: Simulate checksum error in segments_N
+  public void testSegmentsChecksumError() throws IOException {
+    Directory dir = newDirectory();
+
+    IndexWriter writer = null;
+
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+
+    // add 100 documents
+    for (int i = 0; i < 100; i++) {
+      addDoc(writer);
+    }
+
+    // close
+    writer.close();
+
+    long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+    assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
+
+    final String segmentsFileName = SegmentInfos.getCurrentSegmentFileName(dir);
+    IndexInput in = dir.openInput(segmentsFileName);
+    IndexOutput out = dir.createOutput(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen));
+    out.copyBytes(in, in.length()-1);
+    byte b = in.readByte();
+    out.writeByte((byte) (1+b));
+    out.close();
+    in.close();
+
+    IndexReader reader = null;
+    try {
+      reader = IndexReader.open(dir, true);
+    } catch (IOException e) {
+      e.printStackTrace(System.out);
+      fail("segmentInfos failed to retry fallback to correct segments_N file");
+    }
+    reader.close();
+    dir.close();
+  }
+  
+  // Simulate a corrupt index by removing last byte of
+  // latest segments file and make sure we get an
+  // IOException trying to open the index:
+  public void testSimulatedCorruptIndex1() throws IOException {
+      MockDirectoryWrapper dir = newDirectory();
+      dir.setCheckIndexOnClose(false); // we are corrupting it!
+
+      IndexWriter writer = null;
+
+      writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+
+      // add 100 documents
+      for (int i = 0; i < 100; i++) {
+          addDoc(writer);
+      }
+
+      // close
+      writer.close();
+
+      long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+      assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
+
+      String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
+      String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
+                                                                 "",
+                                                                 1+gen);
+      IndexInput in = dir.openInput(fileNameIn);
+      IndexOutput out = dir.createOutput(fileNameOut);
+      long length = in.length();
+      for(int i=0;i<length-1;i++) {
+        out.writeByte(in.readByte());
+      }
+      in.close();
+      out.close();
+      dir.deleteFile(fileNameIn);
+
+      IndexReader reader = null;
+      try {
+        reader = IndexReader.open(dir, true);
+        fail("reader did not hit IOException on opening a corrupt index");
+      } catch (Exception e) {
+      }
+      if (reader != null) {
+        reader.close();
+      }
+      dir.close();
+  }
+  
+  // Simulate a corrupt index by removing one of the cfs
+  // files and make sure we get an IOException trying to
+  // open the index:
+  public void testSimulatedCorruptIndex2() throws IOException {
+      MockDirectoryWrapper dir = newDirectory();
+      dir.setCheckIndexOnClose(false); // we are corrupting it!
+      IndexWriter writer = null;
+
+      writer  = new IndexWriter(
+          dir,
+          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+              setMergePolicy(newLogMergePolicy(true))
+      );
+      ((LogMergePolicy) writer.getConfig().getMergePolicy()).setNoCFSRatio(1.0);
+
+      // add 100 documents
+      for (int i = 0; i < 100; i++) {
+          addDoc(writer);
+      }
+
+      // close
+      writer.close();
+
+      long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+      assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
+
+      String[] files = dir.listAll();
+      boolean corrupted = false;
+      for(int i=0;i<files.length;i++) {
+        if (files[i].endsWith(".cfs")) {
+          dir.deleteFile(files[i]);
+          corrupted = true;
+          break;
+        }
+      }
+      assertTrue("failed to find cfs file to remove", corrupted);
+
+      IndexReader reader = null;
+      try {
+        reader = IndexReader.open(dir, true);
+        fail("reader did not hit IOException on opening a corrupt index");
+      } catch (Exception e) {
+      }
+      if (reader != null) {
+        reader.close();
+      }
+      dir.close();
+  }
+  
+  // Simulate a writer that crashed while writing segments
+  // file: make sure we can still open the index (ie,
+  // gracefully fallback to the previous segments file),
+  // and that we can add to the index:
+  public void testSimulatedCrashedWriter() throws IOException {
+      MockDirectoryWrapper dir = newDirectory();
+      dir.setPreventDoubleWrite(false);
+
+      IndexWriter writer = null;
+
+      writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+
+      // add 100 documents
+      for (int i = 0; i < 100; i++) {
+          addDoc(writer);
+      }
+
+      // close
+      writer.close();
+
+      long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+      assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
+
+      // Make the next segments file, with last byte
+      // missing, to simulate a writer that crashed while
+      // writing segments file:
+      String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
+      String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
+                                                                 "",
+                                                                 1+gen);
+      IndexInput in = dir.openInput(fileNameIn);
+      IndexOutput out = dir.createOutput(fileNameOut);
+      long length = in.length();
+      for(int i=0;i<length-1;i++) {
+        out.writeByte(in.readByte());
+      }
+      in.close();
+      out.close();
+
+      IndexReader reader = null;
+      try {
+        reader = IndexReader.open(dir, true);
+      } catch (Exception e) {
+        fail("reader failed to open on a crashed index");
+      }
+      reader.close();
+
+      try {
+        writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
+      } catch (Exception e) {
+        e.printStackTrace(System.out);
+        fail("writer failed to open on a crashed index");
+      }
+
+      // add 100 documents
+      for (int i = 0; i < 100; i++) {
+          addDoc(writer);
+      }
+
+      // close
+      writer.close();
+      dir.close();
+  }
+
+  public void testAddDocsNonAbortingException() throws Exception {
+    final Directory dir = newDirectory();
+    final RandomIndexWriter w = new RandomIndexWriter(random, dir);
+    final int numDocs1 = random.nextInt(25);
+    for(int docCount=0;docCount<numDocs1;docCount++) {
+      Document doc = new Document();
+      doc.add(newField("content", "good content", Field.Index.ANALYZED));
+      w.addDocument(doc);
+    }
+    
+    final List<Document> docs = new ArrayList<Document>();
+    for(int docCount=0;docCount<7;docCount++) {
+      Document doc = new Document();
+      docs.add(doc);
+      doc.add(newField("id", docCount+"", Field.Index.NOT_ANALYZED));
+      doc.add(newField("content", "silly content " + docCount, Field.Index.ANALYZED));
+      if (docCount == 4) {
+        Field f = newField("crash", "", Field.Index.ANALYZED);
+        doc.add(f);
+        MockTokenizer tokenizer = new MockTokenizer(new StringReader("crash me on the 4th token"), MockTokenizer.WHITESPACE, false);
+        tokenizer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
+        f.setTokenStream(new CrashingFilter("crash", tokenizer));
+      }
+    }
+    try {
+      w.addDocuments(docs);
+      // BUG: CrashingFilter didn't
+      fail("did not hit expected exception");
+    } catch (IOException ioe) {
+      // expected
+      assertEquals(CRASH_FAIL_MESSAGE, ioe.getMessage());
+    }
+
+    final int numDocs2 = random.nextInt(25);
+    for(int docCount=0;docCount<numDocs2;docCount++) {
+      Document doc = new Document();
+      doc.add(newField("content", "good content", Field.Index.ANALYZED));
+      w.addDocument(doc);
+    }
+
+    final IndexReader r = w.getReader();
+    w.close();
+
+    final IndexSearcher s = new IndexSearcher(r);
+    PhraseQuery pq = new PhraseQuery();
+    pq.add(new Term("content", "silly"));
+    pq.add(new Term("content", "content"));
+    assertEquals(0, s.search(pq, 1).totalHits);
+
+    pq = new PhraseQuery();
+    pq.add(new Term("content", "good"));
+    pq.add(new Term("content", "content"));
+    assertEquals(numDocs1+numDocs2, s.search(pq, 1).totalHits);
+    r.close();
+    dir.close();
+  }
+
+
+  public void testUpdateDocsNonAbortingException() throws Exception {
+    final Directory dir = newDirectory();
+    final RandomIndexWriter w = new RandomIndexWriter(random, dir);
+    final int numDocs1 = random.nextInt(25);
+    for(int docCount=0;docCount<numDocs1;docCount++) {
+      Document doc = new Document();
+      doc.add(newField("content", "good content", Field.Index.ANALYZED));
+      w.addDocument(doc);
+    }
+
+    // Use addDocs (no exception) to get docs in the index:
+    final List<Document> docs = new ArrayList<Document>();
+    final int numDocs2 = random.nextInt(25);
+    for(int docCount=0;docCount<numDocs2;docCount++) {
+      Document doc = new Document();
+      docs.add(doc);
+      doc.add(newField("subid", "subs", Field.Index.NOT_ANALYZED));
+      doc.add(newField("id", docCount+"", Field.Index.NOT_ANALYZED));
+      doc.add(newField("content", "silly content " + docCount, Field.Index.ANALYZED));
+    }
+    w.addDocuments(docs);
+
+    final int numDocs3 = random.nextInt(25);
+    for(int docCount=0;docCount<numDocs3;docCount++) {
+      Document doc = new Document();
+      doc.add(newField("content", "good content", Field.Index.ANALYZED));
+      w.addDocument(doc);
+    }
+
+    docs.clear();
+    final int limit = _TestUtil.nextInt(random, 2, 25);
+    final int crashAt = random.nextInt(limit);
+    for(int docCount=0;docCount<limit;docCount++) {
+      Document doc = new Document();
+      docs.add(doc);
+      doc.add(newField("id", docCount+"", Field.Index.NOT_ANALYZED));
+      doc.add(newField("content", "silly content " + docCount, Field.Index.ANALYZED));
+      if (docCount == crashAt) {
+        Field f = newField("crash", "", Field.Index.ANALYZED);
+        doc.add(f);
+        MockTokenizer tokenizer = new MockTokenizer(new StringReader("crash me on the 4th token"), MockTokenizer.WHITESPACE, false);
+        tokenizer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
+        f.setTokenStream(new CrashingFilter("crash", tokenizer));
+      }
+    }
+
+    try {
+      w.updateDocuments(new Term("subid", "subs"), docs);
+      // BUG: CrashingFilter didn't
+      fail("did not hit expected exception");
+    } catch (IOException ioe) {
+      // expected
+      assertEquals(CRASH_FAIL_MESSAGE, ioe.getMessage());
+    }
+
+    final int numDocs4 = random.nextInt(25);
+    for(int docCount=0;docCount<numDocs4;docCount++) {
+      Document doc = new Document();
+      doc.add(newField("content", "good content", Field.Index.ANALYZED));
+      w.addDocument(doc);
+    }
+
+    final IndexReader r = w.getReader();
+    w.close();
+
+    final IndexSearcher s = new IndexSearcher(r);
+    PhraseQuery pq = new PhraseQuery();
+    pq.add(new Term("content", "silly"));
+    pq.add(new Term("content", "content"));
+    assertEquals(numDocs2, s.search(pq, 1).totalHits);
+
+    pq = new PhraseQuery();
+    pq.add(new Term("content", "good"));
+    pq.add(new Term("content", "content"));
+    assertEquals(numDocs1+numDocs3+numDocs4, s.search(pq, 1).totalHits);
+    r.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java
new file mode 100644
index 0000000..8de73df
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java
@@ -0,0 +1,93 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.util._TestUtil;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.Directory;
+
+/**
+ * This tests the patch for issue #LUCENE-715 (IndexWriter does not
+ * release its write lock when trying to open an index which does not yet
+ * exist).
+ */
+
+public class TestIndexWriterLockRelease extends LuceneTestCase {
+    private java.io.File __test_dir;
+
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        if (this.__test_dir == null) {
+            this.__test_dir = _TestUtil.getTempDir("testIndexWriter");
+
+            if (this.__test_dir.exists()) {
+                throw new IOException("test directory \"" + this.__test_dir.getPath() + "\" already exists (please remove by hand)");
+            }
+
+            if (!this.__test_dir.mkdirs()
+                && !this.__test_dir.isDirectory()) {
+                throw new IOException("unable to create test directory \"" + this.__test_dir.getPath() + "\"");
+            }
+        }
+    }
+
+    @Override
+    public void tearDown() throws Exception {
+        if (this.__test_dir != null) {
+            File[] files = this.__test_dir.listFiles();
+
+            for (int i = 0;
+                i < files.length;
+                ++i) {
+                if (!files[i].delete()) {
+                    throw new IOException("unable to remove file in test directory \"" + this.__test_dir.getPath() + "\" (please remove by hand)");
+                }
+            }
+
+            if (!this.__test_dir.delete()) {
+                throw new IOException("unable to remove test directory \"" + this.__test_dir.getPath() + "\" (please remove by hand)");
+            }
+        }
+        super.tearDown();
+    }
+
+    public void testIndexWriterLockRelease() throws IOException {
+        Directory dir = newFSDirectory(this.__test_dir);
+        try {
+          new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
+              new StandardAnalyzer(TEST_VERSION_CURRENT))
+          .setOpenMode(OpenMode.APPEND));
+        } catch (FileNotFoundException e) {
+            try {
+              new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
+                  new StandardAnalyzer(TEST_VERSION_CURRENT))
+              .setOpenMode(OpenMode.APPEND));
+            } catch (FileNotFoundException e1) {
+            }
+        } finally {
+          dir.close();
+        }
+    }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
new file mode 100755
index 0000000..04e5c27
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
@@ -0,0 +1,263 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.Directory;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestIndexWriterMergePolicy extends LuceneTestCase {
+  
+  // Test the normal case
+  public void testNormalCase() throws IOException {
+    Directory dir = newDirectory();
+
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(10).setMergePolicy(new LogDocMergePolicy()));
+
+    for (int i = 0; i < 100; i++) {
+      addDoc(writer);
+      checkInvariants(writer);
+    }
+
+    writer.close();
+    dir.close();
+  }
+
+  // Test to see if there is over merge
+  public void testNoOverMerge() throws IOException {
+    Directory dir = newDirectory();
+
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(10).setMergePolicy(new LogDocMergePolicy()));
+
+    boolean noOverMerge = false;
+    for (int i = 0; i < 100; i++) {
+      addDoc(writer);
+      checkInvariants(writer);
+      if (writer.getNumBufferedDocuments() + writer.getSegmentCount() >= 18) {
+        noOverMerge = true;
+      }
+    }
+    assertTrue(noOverMerge);
+
+    writer.close();
+    dir.close();
+  }
+
+  // Test the case where flush is forced after every addDoc
+  public void testForceFlush() throws IOException {
+    Directory dir = newDirectory();
+
+    LogDocMergePolicy mp = new LogDocMergePolicy();
+    mp.setMinMergeDocs(100);
+    mp.setMergeFactor(10);
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(10).setMergePolicy(mp));
+
+    for (int i = 0; i < 100; i++) {
+      addDoc(writer);
+      writer.close();
+
+      mp = new LogDocMergePolicy();
+      mp.setMergeFactor(10);
+      writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
+          new MockAnalyzer(random)).setOpenMode(
+          OpenMode.APPEND).setMaxBufferedDocs(10).setMergePolicy(mp));
+      mp.setMinMergeDocs(100);
+      checkInvariants(writer);
+    }
+
+    writer.close();
+    dir.close();
+  }
+
+  // Test the case where mergeFactor changes
+  public void testMergeFactorChange() throws IOException {
+    Directory dir = newDirectory();
+
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(10).
+            setMergePolicy(newLogMergePolicy()).
+            setMergeScheduler(new SerialMergeScheduler())
+    );
+
+    writer.setInfoStream(VERBOSE ? System.out : null);
+
+    for (int i = 0; i < 250; i++) {
+      addDoc(writer);
+      checkInvariants(writer);
+    }
+
+    ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
+
+    // merge policy only fixes segments on levels where merges
+    // have been triggered, so check invariants after all adds
+    for (int i = 0; i < 10; i++) {
+      addDoc(writer);
+    }
+    checkInvariants(writer);
+
+    writer.close();
+    dir.close();
+  }
+
+  // Test the case where both mergeFactor and maxBufferedDocs change
+  public void testMaxBufferedDocsChange() throws IOException {
+    Directory dir = newDirectory();
+
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(101).setMergePolicy(new LogDocMergePolicy())
+        .setMergeScheduler(new SerialMergeScheduler()));
+
+    // leftmost* segment has 1 doc
+    // rightmost* segment has 100 docs
+    for (int i = 1; i <= 100; i++) {
+      for (int j = 0; j < i; j++) {
+        addDoc(writer);
+        checkInvariants(writer);
+      }
+      writer.close();
+
+      writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
+          new MockAnalyzer(random)).setOpenMode(
+          OpenMode.APPEND).setMaxBufferedDocs(101).setMergePolicy(new LogDocMergePolicy())
+                          .setMergeScheduler(new SerialMergeScheduler()));
+    }
+
+    writer.close();
+    LogDocMergePolicy ldmp = new LogDocMergePolicy();
+    ldmp.setMergeFactor(10);
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
+        new MockAnalyzer(random)).setOpenMode(
+        OpenMode.APPEND).setMaxBufferedDocs(10).setMergePolicy(ldmp).setMergeScheduler(new SerialMergeScheduler()));
+
+    // merge policy only fixes segments on levels where merges
+    // have been triggered, so check invariants after all adds
+    for (int i = 0; i < 100; i++) {
+      addDoc(writer);
+    }
+    checkInvariants(writer);
+
+    for (int i = 100; i < 1000; i++) {
+      addDoc(writer);
+    }
+    writer.commit();
+    writer.waitForMerges();
+    writer.commit();
+    checkInvariants(writer);
+
+    writer.close();
+    dir.close();
+  }
+
+  // Test the case where a merge results in no doc at all
+  public void testMergeDocCount0() throws IOException {
+    Directory dir = newDirectory();
+
+    LogDocMergePolicy ldmp = new LogDocMergePolicy();
+    ldmp.setMergeFactor(100);
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(10).setMergePolicy(ldmp));
+
+    for (int i = 0; i < 250; i++) {
+      addDoc(writer);
+      checkInvariants(writer);
+    }
+    writer.close();
+
+    IndexReader reader = IndexReader.open(dir, false);
+    reader.deleteDocuments(new Term("content", "aaa"));
+    reader.close();
+
+    ldmp = new LogDocMergePolicy();
+    ldmp.setMergeFactor(5);
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
+        new MockAnalyzer(random)).setOpenMode(
+        OpenMode.APPEND).setMaxBufferedDocs(10).setMergePolicy(ldmp).setMergeScheduler(new ConcurrentMergeScheduler()));
+
+    // merge factor is changed, so check invariants after all adds
+    for (int i = 0; i < 10; i++) {
+      addDoc(writer);
+    }
+    writer.commit();
+    writer.waitForMerges();
+    writer.commit();
+    checkInvariants(writer);
+    assertEquals(10, writer.maxDoc());
+
+    writer.close();
+    dir.close();
+  }
+
+  private void addDoc(IndexWriter writer) throws IOException {
+    Document doc = new Document();
+    doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+  }
+
+  private void checkInvariants(IndexWriter writer) throws IOException {
+    writer.waitForMerges();
+    int maxBufferedDocs = writer.getConfig().getMaxBufferedDocs();
+    int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
+    int maxMergeDocs = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMaxMergeDocs();
+
+    int ramSegmentCount = writer.getNumBufferedDocuments();
+    assertTrue(ramSegmentCount < maxBufferedDocs);
+
+    int lowerBound = -1;
+    int upperBound = maxBufferedDocs;
+    int numSegments = 0;
+
+    int segmentCount = writer.getSegmentCount();
+    for (int i = segmentCount - 1; i >= 0; i--) {
+      int docCount = writer.getDocCount(i);
+      assertTrue("docCount=" + docCount + " lowerBound=" + lowerBound + " upperBound=" + upperBound + " i=" + i + " segmentCount=" + segmentCount + " index=" + writer.segString() + " config=" + writer.getConfig(), docCount > lowerBound);
+
+      if (docCount <= upperBound) {
+        numSegments++;
+      } else {
+        if (upperBound * mergeFactor <= maxMergeDocs) {
+          assertTrue("maxMergeDocs=" + maxMergeDocs + "; numSegments=" + numSegments + "; upperBound=" + upperBound + "; mergeFactor=" + mergeFactor + "; segs=" + writer.segString() + " config=" + writer.getConfig(), numSegments < mergeFactor);
+        }
+
+        do {
+          lowerBound = upperBound;
+          upperBound *= mergeFactor;
+        } while (docCount > upperBound);
+        numSegments = 1;
+      }
+    }
+    if (upperBound * mergeFactor <= maxMergeDocs) {
+      assertTrue(numSegments < mergeFactor);
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterMerging.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
new file mode 100644
index 0000000..e37019d
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
@@ -0,0 +1,303 @@
+package org.apache.lucene.index;
+/**
+ * Copyright 2006 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.util.LuceneTestCase;
+
+import java.io.IOException;
+import java.util.Random;
+
+
+public class TestIndexWriterMerging extends LuceneTestCase
+{
+
+  /**
+   * Tests that index merging (specifically addIndexes(Directory...)) doesn't
+   * change the index order of documents.
+   */
+  public void testLucene() throws IOException {
+    int num=100;
+
+    Directory indexA = newDirectory();
+    Directory indexB = newDirectory();
+
+    fillIndex(random, indexA, 0, num);
+    boolean fail = verifyIndex(indexA, 0);
+    if (fail)
+    {
+      fail("Index a is invalid");
+    }
+
+    fillIndex(random, indexB, num, num);
+    fail = verifyIndex(indexB, num);
+    if (fail)
+    {
+      fail("Index b is invalid");
+    }
+
+    Directory merged = newDirectory();
+
+    IndexWriter writer = new IndexWriter(
+        merged,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMergePolicy(newLogMergePolicy(2))
+    );
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    writer.addIndexes(new Directory[]{indexA, indexB});
+    writer.optimize();
+    writer.close();
+
+    fail = verifyIndex(merged, 0);
+
+    assertFalse("The merged index is invalid", fail);
+    indexA.close();
+    indexB.close();
+    merged.close();
+  }
+
+  private boolean verifyIndex(Directory directory, int startAt) throws IOException
+  {
+    boolean fail = false;
+    IndexReader reader = IndexReader.open(directory, true);
+
+    int max = reader.maxDoc();
+    for (int i = 0; i < max; i++)
+    {
+      Document temp = reader.document(i);
+      //System.out.println("doc "+i+"="+temp.getField("count").stringValue());
+      //compare the index doc number to the value that it should be
+      if (!temp.getField("count").stringValue().equals((i + startAt) + ""))
+      {
+        fail = true;
+        System.out.println("Document " + (i + startAt) + " is returning document " + temp.getField("count").stringValue());
+      }
+    }
+    reader.close();
+    return fail;
+  }
+
+  private void fillIndex(Random random, Directory dir, int start, int numDocs) throws IOException {
+
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setOpenMode(OpenMode.CREATE).
+            setMaxBufferedDocs(2).
+            setMergePolicy(newLogMergePolicy(2))
+    );
+
+    for (int i = start; i < (start + numDocs); i++)
+    {
+      Document temp = new Document();
+      temp.add(newField("count", (""+i), Field.Store.YES, Field.Index.NOT_ANALYZED));
+
+      writer.addDocument(temp);
+    }
+    writer.close();
+  }
+  
+  // LUCENE-325: test expungeDeletes, when 2 singular merges
+  // are required
+  public void testExpungeDeletes() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(2).setRAMBufferSizeMB(
+                                                  IndexWriterConfig.DISABLE_AUTO_FLUSH));
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    Document document = new Document();
+
+    document = new Document();
+    Field storedField = newField("stored", "stored", Field.Store.YES,
+                                  Field.Index.NO);
+    document.add(storedField);
+    Field termVectorField = newField("termVector", "termVector",
+                                      Field.Store.NO, Field.Index.NOT_ANALYZED,
+                                      Field.TermVector.WITH_POSITIONS_OFFSETS);
+    document.add(termVectorField);
+    for(int i=0;i<10;i++)
+      writer.addDocument(document);
+    writer.close();
+
+    IndexReader ir = IndexReader.open(dir, false);
+    assertEquals(10, ir.maxDoc());
+    assertEquals(10, ir.numDocs());
+    ir.deleteDocument(0);
+    ir.deleteDocument(7);
+    assertEquals(8, ir.numDocs());
+    ir.close();
+
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    assertEquals(8, writer.numDocs());
+    assertEquals(10, writer.maxDoc());
+    writer.expungeDeletes();
+    assertEquals(8, writer.numDocs());
+    writer.close();
+    ir = IndexReader.open(dir, true);
+    assertEquals(8, ir.maxDoc());
+    assertEquals(8, ir.numDocs());
+    ir.close();
+    dir.close();
+  }
+
+  // LUCENE-325: test expungeDeletes, when many adjacent merges are required
+  public void testExpungeDeletes2() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(2).
+            setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
+            setMergePolicy(newLogMergePolicy(50))
+    );
+
+    Document document = new Document();
+
+    document = new Document();
+    Field storedField = newField("stored", "stored", Store.YES,
+                                  Index.NO);
+    document.add(storedField);
+    Field termVectorField = newField("termVector", "termVector",
+                                      Store.NO, Index.NOT_ANALYZED,
+                                      TermVector.WITH_POSITIONS_OFFSETS);
+    document.add(termVectorField);
+    for(int i=0;i<98;i++)
+      writer.addDocument(document);
+    writer.close();
+
+    IndexReader ir = IndexReader.open(dir, false);
+    assertEquals(98, ir.maxDoc());
+    assertEquals(98, ir.numDocs());
+    for(int i=0;i<98;i+=2)
+      ir.deleteDocument(i);
+    assertEquals(49, ir.numDocs());
+    ir.close();
+
+    writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMergePolicy(newLogMergePolicy(3))
+    );
+    assertEquals(49, writer.numDocs());
+    writer.expungeDeletes();
+    writer.close();
+    ir = IndexReader.open(dir, true);
+    assertEquals(49, ir.maxDoc());
+    assertEquals(49, ir.numDocs());
+    ir.close();
+    dir.close();
+  }
+
+  // LUCENE-325: test expungeDeletes without waiting, when
+  // many adjacent merges are required
+  public void testExpungeDeletes3() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(2).
+            setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
+            setMergePolicy(newLogMergePolicy(50))
+    );
+
+    Document document = new Document();
+
+    document = new Document();
+    Field storedField = newField("stored", "stored", Field.Store.YES,
+                                  Field.Index.NO);
+    document.add(storedField);
+    Field termVectorField = newField("termVector", "termVector",
+                                      Field.Store.NO, Field.Index.NOT_ANALYZED,
+                                      Field.TermVector.WITH_POSITIONS_OFFSETS);
+    document.add(termVectorField);
+    for(int i=0;i<98;i++)
+      writer.addDocument(document);
+    writer.close();
+
+    IndexReader ir = IndexReader.open(dir, false);
+    assertEquals(98, ir.maxDoc());
+    assertEquals(98, ir.numDocs());
+    for(int i=0;i<98;i+=2)
+      ir.deleteDocument(i);
+    assertEquals(49, ir.numDocs());
+    ir.close();
+
+    writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMergePolicy(newLogMergePolicy(3))
+    );
+    writer.expungeDeletes(false);
+    writer.close();
+    ir = IndexReader.open(dir, true);
+    assertEquals(49, ir.maxDoc());
+    assertEquals(49, ir.numDocs());
+    ir.close();
+    dir.close();
+  }
+  
+  // Just intercepts all merges & verifies that we are never
+  // merging a segment with >= 20 (maxMergeDocs) docs
+  private class MyMergeScheduler extends MergeScheduler {
+    @Override
+    synchronized public void merge(IndexWriter writer)
+      throws CorruptIndexException, IOException {
+
+      while(true) {
+        MergePolicy.OneMerge merge = writer.getNextMerge();
+        if (merge == null) {
+          break;
+        }
+        for(int i=0;i<merge.segments.size();i++) {
+          assert merge.segments.get(i).docCount < 20;
+        }
+        writer.merge(merge);
+      }
+    }
+
+    @Override
+    public void close() {}
+  }
+
+  // LUCENE-1013
+  public void testSetMaxMergeDocs() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+      .setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy());
+    LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
+    lmp.setMaxMergeDocs(20);
+    lmp.setMergeFactor(2);
+    IndexWriter iw = new IndexWriter(dir, conf);
+    iw.setInfoStream(VERBOSE ? System.out : null);
+    Document document = new Document();
+    document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
+                           Field.TermVector.YES));
+    for(int i=0;i<177;i++)
+      iw.addDocument(document);
+    iw.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
new file mode 100644
index 0000000..6071017
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
@@ -0,0 +1,555 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+import static org.apache.lucene.index.TestIndexWriter.assertNoUnreferencedFiles;
+
+/**
+ * Tests for IndexWriter when the disk runs out of space
+ */
+public class TestIndexWriterOnDiskFull extends LuceneTestCase {
+
+  /*
+   * Make sure IndexWriter cleans up on hitting a disk
+   * full exception in addDocument.
+   * TODO: how to do this on windows with FSDirectory?
+   */
+  public void testAddDocumentOnDiskFull() throws IOException {
+
+    for(int pass=0;pass<2;pass++) {
+      if (VERBOSE) {
+        System.out.println("TEST: pass=" + pass);
+      }
+      boolean doAbort = pass == 1;
+      long diskFree = _TestUtil.nextInt(random, 100, 300);
+      while(true) {
+        if (VERBOSE) {
+          System.out.println("TEST: cycle: diskFree=" + diskFree);
+        }
+        MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory());
+        dir.setMaxSizeInBytes(diskFree);
+        IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+        writer.setInfoStream(VERBOSE ? System.out : null);
+        MergeScheduler ms = writer.getConfig().getMergeScheduler();
+        if (ms instanceof ConcurrentMergeScheduler) {
+          // This test intentionally produces exceptions
+          // in the threads that CMS launches; we don't
+          // want to pollute test output with these.
+          ((ConcurrentMergeScheduler) ms).setSuppressExceptions();
+        }
+
+        boolean hitError = false;
+        try {
+          for(int i=0;i<200;i++) {
+            addDoc(writer);
+          }
+          if (VERBOSE) {
+            System.out.println("TEST: done adding docs; now commit");
+          }
+          writer.commit();
+        } catch (IOException e) {
+          if (VERBOSE) {
+            System.out.println("TEST: exception on addDoc");
+            e.printStackTrace(System.out);
+          }
+          hitError = true;
+        }
+
+        if (hitError) {
+          if (doAbort) {
+            if (VERBOSE) {
+              System.out.println("TEST: now rollback");
+            }
+            writer.rollback();
+          } else {
+            try {
+              if (VERBOSE) {
+                System.out.println("TEST: now close");
+              }
+              writer.close();
+            } catch (IOException e) {
+              if (VERBOSE) {
+                System.out.println("TEST: exception on close; retry w/ no disk space limit");
+                e.printStackTrace(System.out);
+              }
+              dir.setMaxSizeInBytes(0);
+              writer.close();
+            }
+          }
+
+          //_TestUtil.syncConcurrentMerges(ms);
+
+          if (_TestUtil.anyFilesExceptWriteLock(dir)) {
+            assertNoUnreferencedFiles(dir, "after disk full during addDocument");
+            
+            // Make sure reader can open the index:
+            IndexReader.open(dir, true).close();
+          }
+            
+          dir.close();
+          // Now try again w/ more space:
+
+          diskFree += TEST_NIGHTLY ? _TestUtil.nextInt(random, 400, 600) : _TestUtil.nextInt(random, 3000, 5000);
+        } else {
+          //_TestUtil.syncConcurrentMerges(writer);
+          dir.setMaxSizeInBytes(0);
+          writer.close();
+          dir.close();
+          break;
+        }
+      }
+    }
+  }
+
+  // TODO: make @Nightly variant that provokes more disk
+  // fulls
+
+  // TODO: have test fail if on any given top
+  // iter there was not a single IOE hit
+
+  /*
+  Test: make sure when we run out of disk space or hit
+  random IOExceptions in any of the addIndexes(*) calls
+  that 1) index is not corrupt (searcher can open/search
+  it) and 2) transactional semantics are followed:
+  either all or none of the incoming documents were in
+  fact added.
+   */
+  public void testAddIndexOnDiskFull() throws IOException
+  {
+    int START_COUNT = 57;
+    int NUM_DIR = 50;
+    int END_COUNT = START_COUNT + NUM_DIR*25;
+    
+    // Build up a bunch of dirs that have indexes which we
+    // will then merge together by calling addIndexes(*):
+    Directory[] dirs = new Directory[NUM_DIR];
+    long inputDiskUsage = 0;
+    for(int i=0;i<NUM_DIR;i++) {
+      dirs[i] = newDirectory();
+      IndexWriter writer  = new IndexWriter(dirs[i], newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      for(int j=0;j<25;j++) {
+        addDocWithIndex(writer, 25*i+j);
+      }
+      writer.close();
+      String[] files = dirs[i].listAll();
+      for(int j=0;j<files.length;j++) {
+        inputDiskUsage += dirs[i].fileLength(files[j]);
+      }
+    }
+    
+    // Now, build a starting index that has START_COUNT docs.  We
+    // will then try to addIndexesNoOptimize into a copy of this:
+    MockDirectoryWrapper startDir = newDirectory();
+    IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    for(int j=0;j<START_COUNT;j++) {
+      addDocWithIndex(writer, j);
+    }
+    writer.close();
+    
+    // Make sure starting index seems to be working properly:
+    Term searchTerm = new Term("content", "aaa");        
+    IndexReader reader = IndexReader.open(startDir, true);
+    assertEquals("first docFreq", 57, reader.docFreq(searchTerm));
+    
+    IndexSearcher searcher = newSearcher(reader);
+    ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+    assertEquals("first number of hits", 57, hits.length);
+    searcher.close();
+    reader.close();
+    
+    // Iterate with larger and larger amounts of free
+    // disk space.  With little free disk space,
+    // addIndexes will certainly run out of space &
+    // fail.  Verify that when this happens, index is
+    // not corrupt and index in fact has added no
+    // documents.  Then, we increase disk space by 2000
+    // bytes each iteration.  At some point there is
+    // enough free disk space and addIndexes should
+    // succeed and index should show all documents were
+    // added.
+    
+    // String[] files = startDir.listAll();
+    long diskUsage = startDir.sizeInBytes();
+    
+    long startDiskUsage = 0;
+    String[] files = startDir.listAll();
+    for(int i=0;i<files.length;i++) {
+      startDiskUsage += startDir.fileLength(files[i]);
+    }
+    
+    for(int iter=0;iter<3;iter++) {
+      
+      if (VERBOSE)
+        System.out.println("TEST: iter=" + iter);
+      
+      // Start with 100 bytes more than we are currently using:
+      long diskFree = diskUsage+_TestUtil.nextInt(random, 50, 200);
+      
+      int method = iter;
+      
+      boolean success = false;
+      boolean done = false;
+      
+      String methodName;
+      if (0 == method) {
+        methodName = "addIndexes(Directory[]) + optimize()";
+      } else if (1 == method) {
+        methodName = "addIndexes(IndexReader[])";
+      } else {
+        methodName = "addIndexes(Directory[])";
+      }
+      
+      while(!done) {
+        if (VERBOSE) {
+          System.out.println("TEST: cycle...");
+        }
+        
+        // Make a new dir that will enforce disk usage:
+        MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir));
+        writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy()));
+        IOException err = null;
+        writer.setInfoStream(VERBOSE ? System.out : null);
+
+        MergeScheduler ms = writer.getConfig().getMergeScheduler();
+        for(int x=0;x<2;x++) {
+          if (ms instanceof ConcurrentMergeScheduler)
+            // This test intentionally produces exceptions
+            // in the threads that CMS launches; we don't
+            // want to pollute test output with these.
+            if (0 == x)
+              ((ConcurrentMergeScheduler) ms).setSuppressExceptions();
+            else
+              ((ConcurrentMergeScheduler) ms).clearSuppressExceptions();
+          
+          // Two loops: first time, limit disk space &
+          // throw random IOExceptions; second time, no
+          // disk space limit:
+          
+          double rate = 0.05;
+          double diskRatio = ((double) diskFree)/diskUsage;
+          long thisDiskFree;
+          
+          String testName = null;
+          
+          if (0 == x) {
+            thisDiskFree = diskFree;
+            if (diskRatio >= 2.0) {
+              rate /= 2;
+            }
+            if (diskRatio >= 4.0) {
+              rate /= 2;
+            }
+            if (diskRatio >= 6.0) {
+              rate = 0.0;
+            }
+            if (VERBOSE)
+              testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes";
+          } else {
+            thisDiskFree = 0;
+            rate = 0.0;
+            if (VERBOSE)
+              testName = "disk full test " + methodName + " with unlimited disk space";
+          }
+          
+          if (VERBOSE)
+            System.out.println("\ncycle: " + testName);
+          
+          dir.setTrackDiskUsage(true);
+          dir.setMaxSizeInBytes(thisDiskFree);
+          dir.setRandomIOExceptionRate(rate);
+          
+          try {
+            
+            if (0 == method) {
+              writer.addIndexes(dirs);
+              writer.optimize();
+            } else if (1 == method) {
+              IndexReader readers[] = new IndexReader[dirs.length];
+              for(int i=0;i<dirs.length;i++) {
+                readers[i] = IndexReader.open(dirs[i], true);
+              }
+              try {
+                writer.addIndexes(readers);
+              } finally {
+                for(int i=0;i<dirs.length;i++) {
+                  readers[i].close();
+                }
+              }
+            } else {
+              writer.addIndexes(dirs);
+            }
+            
+            success = true;
+            if (VERBOSE) {
+              System.out.println("  success!");
+            }
+            
+            if (0 == x) {
+              done = true;
+            }
+            
+          } catch (IOException e) {
+            success = false;
+            err = e;
+            if (VERBOSE) {
+              System.out.println("  hit IOException: " + e);
+              e.printStackTrace(System.out);
+            }
+            
+            if (1 == x) {
+              e.printStackTrace(System.out);
+              fail(methodName + " hit IOException after disk space was freed up");
+            }
+          }
+          
+          // Make sure all threads from
+          // ConcurrentMergeScheduler are done
+          _TestUtil.syncConcurrentMerges(writer);
+          
+          if (VERBOSE) {
+            System.out.println("  now test readers");
+          }
+          
+          // Finally, verify index is not corrupt, and, if
+          // we succeeded, we see all docs added, and if we
+          // failed, we see either all docs or no docs added
+          // (transactional semantics):
+          try {
+            reader = IndexReader.open(dir, true);
+          } catch (IOException e) {
+            e.printStackTrace(System.out);
+            fail(testName + ": exception when creating IndexReader: " + e);
+          }
+          int result = reader.docFreq(searchTerm);
+          if (success) {
+            if (result != START_COUNT) {
+              fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT);
+            }
+          } else {
+            // On hitting exception we still may have added
+            // all docs:
+            if (result != START_COUNT && result != END_COUNT) {
+              err.printStackTrace(System.out);
+              fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
+            }
+          }
+          
+          searcher = newSearcher(reader);
+          try {
+            hits = searcher.search(new TermQuery(searchTerm), null, END_COUNT).scoreDocs;
+          } catch (IOException e) {
+            e.printStackTrace(System.out);
+            fail(testName + ": exception when searching: " + e);
+          }
+          int result2 = hits.length;
+          if (success) {
+            if (result2 != result) {
+              fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
+            }
+          } else {
+            // On hitting exception we still may have added
+            // all docs:
+            if (result2 != result) {
+              err.printStackTrace(System.out);
+              fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
+            }
+          }
+          
+          searcher.close();
+          reader.close();
+          if (VERBOSE) {
+            System.out.println("  count is " + result);
+          }
+          
+          if (done || result == END_COUNT) {
+            break;
+          }
+        }
+        
+        if (VERBOSE) {
+          System.out.println("  start disk = " + startDiskUsage + "; input disk = " + inputDiskUsage + "; max used = " + dir.getMaxUsedSizeInBytes());
+        }
+        
+        if (done) {
+          // Javadocs state that temp free Directory space
+          // required is at most 2X total input size of
+          // indices so let's make sure:
+          assertTrue("max free Directory space required exceeded 1X the total input index sizes during " + methodName +
+                     ": max temp usage = " + (dir.getMaxUsedSizeInBytes()-startDiskUsage) + " bytes vs limit=" + (2*(startDiskUsage + inputDiskUsage)) +
+                     "; starting disk usage = " + startDiskUsage + " bytes; " +
+                     "input index disk usage = " + inputDiskUsage + " bytes",
+                     (dir.getMaxUsedSizeInBytes()-startDiskUsage) < 2*(startDiskUsage + inputDiskUsage));
+        }
+        
+        // Make sure we don't hit disk full during close below:
+        dir.setMaxSizeInBytes(0);
+        dir.setRandomIOExceptionRate(0.0);
+        
+        writer.close();
+        
+        // Wait for all BG threads to finish else
+        // dir.close() will throw IOException because
+        // there are still open files
+        _TestUtil.syncConcurrentMerges(ms);
+        
+        dir.close();
+        
+        // Try again with more free space:
+        diskFree += TEST_NIGHTLY ? _TestUtil.nextInt(random, 4000, 8000) : _TestUtil.nextInt(random, 40000, 80000);
+      }
+    }
+    
+    startDir.close();
+    for (Directory dir : dirs)
+      dir.close();
+  }
+  
+  private static class FailTwiceDuringMerge extends MockDirectoryWrapper.Failure {
+    public boolean didFail1;
+    public boolean didFail2;
+
+    @Override
+    public void eval(MockDirectoryWrapper dir)  throws IOException {
+      if (!doFail) {
+        return;
+      }
+      StackTraceElement[] trace = new Exception().getStackTrace();
+      for (int i = 0; i < trace.length; i++) {
+        if ("org.apache.lucene.index.SegmentMerger".equals(trace[i].getClassName()) && "mergeTerms".equals(trace[i].getMethodName()) && !didFail1) {
+          didFail1 = true;
+          throw new IOException("fake disk full during mergeTerms");
+        }
+        if ("org.apache.lucene.util.BitVector".equals(trace[i].getClassName()) && "write".equals(trace[i].getMethodName()) && !didFail2) {
+          didFail2 = true;
+          throw new IOException("fake disk full while writing BitVector");
+        }
+      }
+    }
+  }
+  
+  // LUCENE-2593
+  public void testCorruptionAfterDiskFullDuringMerge() throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+    //IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setReaderPooling(true));
+    IndexWriter w = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMergeScheduler(new SerialMergeScheduler()).
+            setReaderPooling(true).
+            setMergePolicy(newLogMergePolicy(2))
+    );
+
+    _TestUtil.keepFullyDeletedSegments(w);
+
+    ((LogMergePolicy) w.getMergePolicy()).setMergeFactor(2);
+
+    Document doc = new Document();
+    doc.add(newField("f", "doctor who", Field.Store.YES, Field.Index.ANALYZED));
+    w.addDocument(doc);
+    w.commit();
+
+    w.deleteDocuments(new Term("f", "who"));
+    w.addDocument(doc);
+    
+    // disk fills up!
+    FailTwiceDuringMerge ftdm = new FailTwiceDuringMerge();
+    ftdm.setDoFail();
+    dir.failOn(ftdm);
+
+    try {
+      w.commit();
+      fail("fake disk full IOExceptions not hit");
+    } catch (IOException ioe) {
+      // expected
+      assertTrue(ftdm.didFail1 || ftdm.didFail2);
+    }
+    _TestUtil.checkIndex(dir);
+    ftdm.clearDoFail();
+    w.addDocument(doc);
+    w.close();
+
+    dir.close();
+  }
+  
+  // LUCENE-1130: make sure immeidate disk full on creating
+  // an IndexWriter (hit during DW.ThreadState.init()) is
+  // OK:
+  public void testImmediateDiskFull() throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
+    dir.setMaxSizeInBytes(Math.max(1, dir.getRecomputedActualSizeInBytes()));
+    final Document doc = new Document();
+    doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    try {
+      writer.addDocument(doc);
+      fail("did not hit disk full");
+    } catch (IOException ioe) {
+    }
+    // Without fix for LUCENE-1130: this call will hang:
+    try {
+      writer.addDocument(doc);
+      fail("did not hit disk full");
+    } catch (IOException ioe) {
+    }
+    try {
+      writer.close(false);
+      fail("did not hit disk full");
+    } catch (IOException ioe) {
+    }
+
+    // Make sure once disk space is avail again, we can
+    // cleanly close:
+    dir.setMaxSizeInBytes(0);
+    writer.close(false);
+    dir.close();
+  }
+  
+  // TODO: these are also in TestIndexWriter... add a simple doc-writing method
+  // like this to LuceneTestCase?
+  private void addDoc(IndexWriter writer) throws IOException
+  {
+      Document doc = new Document();
+      doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+  }
+  
+  private void addDocWithIndex(IndexWriter writer, int index) throws IOException
+  {
+      Document doc = new Document();
+      doc.add(newField("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
+      doc.add(newField("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterOnJRECrash.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterOnJRECrash.java
new file mode 100644
index 0000000..4950436
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterOnJRECrash.java
@@ -0,0 +1,162 @@
+package org.apache.lucene.index;
+
+/**
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+import java.io.BufferedInputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.Constants;
+import org.apache.lucene.util._TestUtil;
+
+/**
+ * Runs TestNRTThreads in a separate process, crashes the JRE in the middle
+ * of execution, then runs checkindex to make sure its not corrupt.
+ */
+public class TestIndexWriterOnJRECrash extends TestNRTThreads {
+  private File tempDir;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    tempDir = _TestUtil.getTempDir("jrecrash");
+    tempDir.delete();
+    tempDir.mkdir();
+  }
+  
+  @Override
+  public void testNRTThreads() throws Exception {
+    String vendor = Constants.JAVA_VENDOR;
+    assumeTrue(vendor + " JRE not supported.", 
+        vendor.startsWith("Oracle") || vendor.startsWith("Sun") || vendor.startsWith("Apple"));
+    
+    // if we are not the fork
+    if (System.getProperty("tests.crashmode") == null) {
+      // try up to 10 times to create an index
+      for (int i = 0; i < 10; i++) {
+        forkTest();
+        // if we succeeded in finding an index, we are done.
+        if (checkIndexes(tempDir))
+          return;
+      }
+    } else {
+      // we are the fork, setup a crashing thread
+      final int crashTime = _TestUtil.nextInt(random, 3000, 4000);
+      Thread t = new Thread() {
+        @Override
+        public void run() {
+          try {
+            Thread.sleep(crashTime);
+          } catch (InterruptedException e) {}
+          crashJRE();
+        }
+      };
+      t.setPriority(Thread.MAX_PRIORITY);
+      t.start();
+      // run the test until we crash.
+      for (int i = 0; i < 1000; i++) {
+        super.testNRTThreads();
+      }
+    }
+  }
+  
+  /** fork ourselves in a new jvm. sets -Dtests.crashmode=true */
+  public void forkTest() throws Exception {
+    List<String> cmd = new ArrayList<String>();
+    cmd.add(System.getProperty("java.home") 
+        + System.getProperty("file.separator")
+        + "bin"
+        + System.getProperty("file.separator")
+        + "java");
+    cmd.add("-Xmx512m");
+    cmd.add("-Dtests.crashmode=true");
+    // passing NIGHTLY to this test makes it run for much longer, easier to catch it in the act...
+    cmd.add("-Dtests.nightly=true");
+    cmd.add("-DtempDir=" + tempDir.getPath());
+    cmd.add("-Dtests.seed=" + random.nextLong() + ":" + random.nextLong());
+    cmd.add("-ea");
+    cmd.add("-cp");
+    cmd.add(System.getProperty("java.class.path"));
+    cmd.add("org.junit.runner.JUnitCore");
+    cmd.add(getClass().getName());
+    ProcessBuilder pb = new ProcessBuilder(cmd);
+    pb.directory(tempDir);
+    pb.redirectErrorStream(true);
+    Process p = pb.start();
+    InputStream is = p.getInputStream();
+    BufferedInputStream isl = new BufferedInputStream(is);
+    byte buffer[] = new byte[1024];
+    int len = 0;
+    if (VERBOSE) System.err.println(">>> Begin subprocess output");
+    while ((len = isl.read(buffer)) != -1) {
+      if (VERBOSE) {
+        System.err.write(buffer, 0, len);
+      }
+    }
+    if (VERBOSE) System.err.println("<<< End subprocess output");
+    p.waitFor();
+  }
+  
+  /**
+   * Recursively looks for indexes underneath <code>file</code>,
+   * and runs checkindex on them. returns true if it found any indexes.
+   */
+  public boolean checkIndexes(File file) throws IOException {
+    if (file.isDirectory()) {
+      MockDirectoryWrapper dir = newFSDirectory(file);
+      dir.setCheckIndexOnClose(false); // don't double-checkindex
+      if (IndexReader.indexExists(dir)) {
+        if (VERBOSE) {
+          System.err.println("Checking index: " + file);
+        }
+        _TestUtil.checkIndex(dir);
+        dir.close();
+        return true;
+      }
+      dir.close();
+      for (File f : file.listFiles())
+        if (checkIndexes(f))
+          return true;
+    }
+    return false;
+  }
+  
+  /**
+   * currently, this only works/tested on Sun and IBM.
+   */
+  public void crashJRE() {
+    try {
+      Class<?> clazz = Class.forName("sun.misc.Unsafe");
+      // we should use getUnsafe instead, harmony implements it, etc.
+      Field field = clazz.getDeclaredField("theUnsafe");
+      field.setAccessible(true);
+      Object o = field.get(null);
+      Method m = clazz.getMethod("putAddress", long.class, long.class);
+      m.invoke(o, 0L, 0L);
+    } catch (Exception e) { e.printStackTrace(); }
+    fail();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java
new file mode 100644
index 0000000..523e253
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java
@@ -0,0 +1,215 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestIndexWriterOptimize extends LuceneTestCase {
+  public void testOptimizeMaxNumSegments() throws IOException {
+
+    MockDirectoryWrapper dir = newDirectory();
+
+    final Document doc = new Document();
+    doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
+    final int incrMin = TEST_NIGHTLY ? 15 : 40;
+    for(int numDocs=10;numDocs<500;numDocs += _TestUtil.nextInt(random, incrMin, 5*incrMin)) {
+      LogDocMergePolicy ldmp = new LogDocMergePolicy();
+      ldmp.setMinMergeDocs(1);
+      ldmp.setMergeFactor(5);
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
+            ldmp));
+      for(int j=0;j<numDocs;j++)
+        writer.addDocument(doc);
+      writer.close();
+
+      SegmentInfos sis = new SegmentInfos();
+      sis.read(dir);
+      final int segCount = sis.size();
+
+      ldmp = new LogDocMergePolicy();
+      ldmp.setMergeFactor(5);
+      writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
+        new MockAnalyzer(random)).setMergePolicy(ldmp));
+      writer.optimize(3);
+      writer.close();
+
+      sis = new SegmentInfos();
+      sis.read(dir);
+      final int optSegCount = sis.size();
+
+      if (segCount < 3)
+        assertEquals(segCount, optSegCount);
+      else
+        assertEquals(3, optSegCount);
+    }
+    dir.close();
+  }
+
+  public void testOptimizeMaxNumSegments2() throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+
+    final Document doc = new Document();
+    doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
+
+    LogDocMergePolicy ldmp = new LogDocMergePolicy();
+    ldmp.setMinMergeDocs(1);
+    ldmp.setMergeFactor(4);
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+      TEST_VERSION_CURRENT, new MockAnalyzer(random))
+      .setMaxBufferedDocs(2).setMergePolicy(ldmp).setMergeScheduler(new ConcurrentMergeScheduler()));
+
+    for(int iter=0;iter<10;iter++) {
+      for(int i=0;i<19;i++)
+        writer.addDocument(doc);
+
+      writer.commit();
+      writer.waitForMerges();
+      writer.commit();
+
+      SegmentInfos sis = new SegmentInfos();
+      sis.read(dir);
+
+      final int segCount = sis.size();
+
+      writer.optimize(7);
+      writer.commit();
+      writer.waitForMerges();
+
+      sis = new SegmentInfos();
+      sis.read(dir);
+      final int optSegCount = sis.size();
+
+      if (segCount < 7)
+        assertEquals(segCount, optSegCount);
+      else
+        assertEquals(7, optSegCount);
+    }
+    writer.close();
+    dir.close();
+  }
+
+  /**
+   * Make sure optimize doesn't use any more than 1X
+   * starting index size as its temporary free space
+   * required.
+   */
+  public void testOptimizeTempSpaceUsage() throws IOException {
+
+    MockDirectoryWrapper dir = newDirectory();
+    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10).setMergePolicy(newLogMergePolicy()));
+    if (VERBOSE) {
+      System.out.println("TEST: config1=" + writer.getConfig());
+    }
+
+    for(int j=0;j<500;j++) {
+      TestIndexWriter.addDocWithIndex(writer, j);
+    }
+    final int termIndexInterval = writer.getConfig().getTermIndexInterval();
+    // force one extra segment w/ different doc store so
+    // we see the doc stores get merged
+    writer.commit();
+    TestIndexWriter.addDocWithIndex(writer, 500);
+    writer.close();
+
+    if (VERBOSE) {
+      System.out.println("TEST: start disk usage");
+    }
+    long startDiskUsage = 0;
+    String[] files = dir.listAll();
+    for(int i=0;i<files.length;i++) {
+      startDiskUsage += dir.fileLength(files[i]);
+      if (VERBOSE) {
+        System.out.println(files[i] + ": " + dir.fileLength(files[i]));
+      }
+    }
+
+    dir.resetMaxUsedSizeInBytes();
+    dir.setTrackDiskUsage(true);
+
+    // Import to use same term index interval else a
+    // smaller one here could increase the disk usage and
+    // cause a false failure:
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval).setMergePolicy(newLogMergePolicy()));
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    writer.optimize();
+    writer.close();
+    long maxDiskUsage = dir.getMaxUsedSizeInBytes();
+    assertTrue("optimize used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)",
+               maxDiskUsage <= 4*startDiskUsage);
+    dir.close();
+  }
+  
+  // Test calling optimize(false) whereby optimize is kicked
+  // off but we don't wait for it to finish (but
+  // writer.close()) does wait
+  public void testBackgroundOptimize() throws IOException {
+
+    Directory dir = newDirectory();
+    for(int pass=0;pass<2;pass++) {
+      IndexWriter writer = new IndexWriter(
+          dir,
+          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+              setOpenMode(OpenMode.CREATE).
+              setMaxBufferedDocs(2).
+              setMergePolicy(newLogMergePolicy(51))
+      );
+      Document doc = new Document();
+      doc.add(newField("field", "aaa", Store.NO, Index.NOT_ANALYZED));
+      for(int i=0;i<100;i++)
+        writer.addDocument(doc);
+      writer.optimize(false);
+
+      if (0 == pass) {
+        writer.close();
+        IndexReader reader = IndexReader.open(dir, true);
+        assertTrue(reader.isOptimized());
+        reader.close();
+      } else {
+        // Get another segment to flush so we can verify it is
+        // NOT included in the optimization
+        writer.addDocument(doc);
+        writer.addDocument(doc);
+        writer.close();
+
+        IndexReader reader = IndexReader.open(dir, true);
+        assertTrue(!reader.isOptimized());
+        reader.close();
+
+        SegmentInfos infos = new SegmentInfos();
+        infos.read(dir);
+        assertEquals(2, infos.size());
+      }
+    }
+
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterReader.java
new file mode 100644
index 0000000..af32c94
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterReader.java
@@ -0,0 +1,1003 @@
+package org.apache.lucene.index;
+
+/**
+ * Copyright 2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import org.apache.lucene.util.ThreadInterruptedException;
+import java.util.concurrent.atomic.AtomicInteger;
+
+public class TestIndexWriterReader extends LuceneTestCase {
+  static PrintStream infoStream = VERBOSE ? System.out : null;
+  
+  public static int count(Term t, IndexReader r) throws IOException {
+    int count = 0;
+    TermDocs td = r.termDocs(t);
+    while (td.next()) {
+      td.doc();
+      count++;
+    }
+    td.close();
+    return count;
+  }
+  
+  public void testAddCloseOpen() throws IOException {
+    Directory dir1 = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    
+    IndexWriter writer = new IndexWriter(dir1, iwc);
+    for (int i = 0; i < 97 ; i++) {
+      IndexReader reader = writer.getReader();
+      if (i == 0) {
+        writer.addDocument(DocHelper.createDocument(i, "x", 1 + random.nextInt(5)));
+      } else {
+        int previous = random.nextInt(i);
+        // a check if the reader is current here could fail since there might be
+        // merges going on.
+        switch (random.nextInt(5)) {
+        case 0:
+        case 1:
+        case 2:
+          writer.addDocument(DocHelper.createDocument(i, "x", 1 + random.nextInt(5)));
+          break;
+        case 3:
+          writer.updateDocument(new Term("id", "" + previous), DocHelper.createDocument(
+              previous, "x", 1 + random.nextInt(5)));
+          break;
+        case 4:
+          writer.deleteDocuments(new Term("id", "" + previous));
+        }
+      }
+      assertFalse(reader.isCurrent());
+      reader.close();
+    }
+    writer.optimize(); // make sure all merging is done etc.
+    IndexReader reader = writer.getReader();
+    writer.commit(); // no changes that are not visible to the reader
+    assertTrue(reader.isCurrent());
+    writer.close();
+    assertTrue(reader.isCurrent()); // all changes are visible to the reader
+    iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    writer = new IndexWriter(dir1, iwc);
+    assertTrue(reader.isCurrent());
+    writer.addDocument(DocHelper.createDocument(1, "x", 1+random.nextInt(5)));
+    assertTrue(reader.isCurrent()); // segments in ram but IW is different to the readers one
+    writer.close();
+    assertFalse(reader.isCurrent()); // segments written
+    reader.close();
+    dir1.close();
+  }
+  
+  public void testUpdateDocument() throws Exception {
+    boolean optimize = true;
+
+    Directory dir1 = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    if (iwc.getMaxBufferedDocs() < 20) {
+      iwc.setMaxBufferedDocs(20);
+    }
+    // no merging
+    if (random.nextBoolean()) {
+      iwc.setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES);
+    } else {
+      iwc.setMergePolicy(NoMergePolicy.COMPOUND_FILES);
+    }
+    IndexWriter writer = new IndexWriter(dir1, iwc);
+
+    // create the index
+    createIndexNoClose(!optimize, "index1", writer);
+
+    // writer.flush(false, true, true);
+
+    // get a reader
+    IndexReader r1 = writer.getReader();
+    assertTrue(r1.isCurrent());
+
+    String id10 = r1.document(10).getField("id").stringValue();
+    
+    Document newDoc = r1.document(10);
+    newDoc.removeField("id");
+    newDoc.add(newField("id", Integer.toString(8000), Store.YES, Index.NOT_ANALYZED));
+    writer.updateDocument(new Term("id", id10), newDoc);
+    assertFalse(r1.isCurrent());
+
+    IndexReader r2 = writer.getReader();
+    assertTrue(r2.isCurrent());
+    assertEquals(0, count(new Term("id", id10), r2));
+    assertEquals(1, count(new Term("id", Integer.toString(8000)), r2));
+    
+    r1.close();
+    writer.close();
+    assertTrue(r2.isCurrent());
+    
+    IndexReader r3 = IndexReader.open(dir1, true);
+    assertTrue(r3.isCurrent());
+    assertTrue(r2.isCurrent());
+    assertEquals(0, count(new Term("id", id10), r3));
+    assertEquals(1, count(new Term("id", Integer.toString(8000)), r3));
+
+    writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    assertTrue(r2.isCurrent());
+    assertTrue(r3.isCurrent());
+
+    writer.close();
+
+    assertFalse(r2.isCurrent());
+    assertTrue(!r3.isCurrent());
+
+    r2.close();
+    r3.close();
+    
+    dir1.close();
+  }
+  
+  public void testIsCurrent() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    
+    IndexWriter writer = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    writer.close();
+    
+    iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    writer = new IndexWriter(dir, iwc);
+    doc = new Document();
+    doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+    IndexReader nrtReader = writer.getReader();
+    assertTrue(nrtReader.isCurrent());
+    writer.addDocument(doc);
+    assertFalse(nrtReader.isCurrent()); // should see the changes
+    writer.optimize(); // make sure we don't have a merge going on
+    assertFalse(nrtReader.isCurrent());
+    nrtReader.close();
+    
+    IndexReader dirReader = IndexReader.open(dir);
+    nrtReader = writer.getReader();
+    
+    assertTrue(dirReader.isCurrent());
+    assertTrue(nrtReader.isCurrent()); // nothing was committed yet so we are still current
+    assertEquals(2, nrtReader.maxDoc()); // sees the actual document added
+    assertEquals(1, dirReader.maxDoc());
+    writer.close(); // close is actually a commit both should see the changes
+    assertTrue(nrtReader.isCurrent()); 
+    assertFalse(dirReader.isCurrent()); // this reader has been opened before the writer was closed / committed
+    
+    dirReader.close();
+    nrtReader.close();
+    dir.close();
+  }
+  
+  /**
+   * Test using IW.addIndexes
+   * 
+   * @throws Exception
+   */
+  public void testAddIndexes() throws Exception {
+    boolean optimize = false;
+
+    Directory dir1 = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    if (iwc.getMaxBufferedDocs() < 20) {
+      iwc.setMaxBufferedDocs(20);
+    }
+    // no merging
+    if (random.nextBoolean()) {
+      iwc.setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES);
+    } else {
+      iwc.setMergePolicy(NoMergePolicy.COMPOUND_FILES);
+    }
+    IndexWriter writer = new IndexWriter(dir1, iwc);
+
+    writer.setInfoStream(infoStream);
+    // create the index
+    createIndexNoClose(!optimize, "index1", writer);
+    writer.flush(false, true);
+
+    // create a 2nd index
+    Directory dir2 = newDirectory();
+    IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer2.setInfoStream(infoStream);
+    createIndexNoClose(!optimize, "index2", writer2);
+    writer2.close();
+
+    IndexReader r0 = writer.getReader();
+    assertTrue(r0.isCurrent());
+    writer.addIndexes(new Directory[] { dir2 });
+    assertFalse(r0.isCurrent());
+    r0.close();
+
+    IndexReader r1 = writer.getReader();
+    assertTrue(r1.isCurrent());
+
+    writer.commit();
+    assertTrue(r1.isCurrent()); // we have seen all changes - no change after opening the NRT reader
+
+    assertEquals(200, r1.maxDoc());
+
+    int index2df = r1.docFreq(new Term("indexname", "index2"));
+
+    assertEquals(100, index2df);
+
+    // verify the docs are from different indexes
+    Document doc5 = r1.document(5);
+    assertEquals("index1", doc5.get("indexname"));
+    Document doc150 = r1.document(150);
+    assertEquals("index2", doc150.get("indexname"));
+    r1.close();
+    writer.close();
+    dir1.close();
+    dir2.close();
+  }
+  
+  public void testAddIndexes2() throws Exception {
+    boolean optimize = false;
+
+    Directory dir1 = newDirectory();
+    IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer.setInfoStream(infoStream);
+
+    // create a 2nd index
+    Directory dir2 = newDirectory();
+    IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer2.setInfoStream(infoStream);
+    createIndexNoClose(!optimize, "index2", writer2);
+    writer2.close();
+
+    writer.addIndexes(new Directory[] { dir2 });
+    writer.addIndexes(new Directory[] { dir2 });
+    writer.addIndexes(new Directory[] { dir2 });
+    writer.addIndexes(new Directory[] { dir2 });
+    writer.addIndexes(new Directory[] { dir2 });
+
+    IndexReader r1 = writer.getReader();
+    assertEquals(500, r1.maxDoc());
+    
+    r1.close();
+    writer.close();
+    dir1.close();
+    dir2.close();
+  }
+
+  /**
+   * Deletes using IW.deleteDocuments
+   * 
+   * @throws Exception
+   */
+  public void testDeleteFromIndexWriter() throws Exception {
+    boolean optimize = true;
+
+    Directory dir1 = newDirectory();
+    IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setReaderTermsIndexDivisor(2));
+    writer.setInfoStream(infoStream);
+    // create the index
+    createIndexNoClose(!optimize, "index1", writer);
+    writer.flush(false, true);
+    // get a reader
+    IndexReader r1 = writer.getReader();
+
+    String id10 = r1.document(10).getField("id").stringValue();
+
+    // deleted IW docs should not show up in the next getReader
+    writer.deleteDocuments(new Term("id", id10));
+    IndexReader r2 = writer.getReader();
+    assertEquals(1, count(new Term("id", id10), r1));
+    assertEquals(0, count(new Term("id", id10), r2));
+    
+    String id50 = r1.document(50).getField("id").stringValue();
+    assertEquals(1, count(new Term("id", id50), r1));
+    
+    writer.deleteDocuments(new Term("id", id50));
+    
+    IndexReader r3 = writer.getReader();
+    assertEquals(0, count(new Term("id", id10), r3));
+    assertEquals(0, count(new Term("id", id50), r3));
+    
+    String id75 = r1.document(75).getField("id").stringValue();
+    writer.deleteDocuments(new TermQuery(new Term("id", id75)));
+    IndexReader r4 = writer.getReader();
+    assertEquals(1, count(new Term("id", id75), r3));
+    assertEquals(0, count(new Term("id", id75), r4));
+    
+    r1.close();
+    r2.close();
+    r3.close();
+    r4.close();
+    writer.close();
+        
+    // reopen the writer to verify the delete made it to the directory
+    writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer.setInfoStream(infoStream);
+    IndexReader w2r1 = writer.getReader();
+    assertEquals(0, count(new Term("id", id10), w2r1));
+    w2r1.close();
+    writer.close();
+    dir1.close();
+  }
+
+  public void testAddIndexesAndDoDeletesThreads() throws Throwable {
+    final int numIter = 2;
+    int numDirs = 3;
+    
+    Directory mainDir = newDirectory();
+    IndexWriter mainWriter = new IndexWriter(mainDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    _TestUtil.reduceOpenFiles(mainWriter);
+
+    mainWriter.setInfoStream(infoStream);
+    AddDirectoriesThreads addDirThreads = new AddDirectoriesThreads(numIter, mainWriter);
+    addDirThreads.launchThreads(numDirs);
+    addDirThreads.joinThreads();
+    
+    //assertEquals(100 + numDirs * (3 * numIter / 4) * addDirThreads.NUM_THREADS
+    //    * addDirThreads.NUM_INIT_DOCS, addDirThreads.mainWriter.numDocs());
+    assertEquals(addDirThreads.count.intValue(), addDirThreads.mainWriter.numDocs());
+
+    addDirThreads.close(true);
+    
+    assertTrue(addDirThreads.failures.size() == 0);
+
+    _TestUtil.checkIndex(mainDir);
+
+    IndexReader reader = IndexReader.open(mainDir, true);
+    assertEquals(addDirThreads.count.intValue(), reader.numDocs());
+    //assertEquals(100 + numDirs * (3 * numIter / 4) * addDirThreads.NUM_THREADS
+    //    * addDirThreads.NUM_INIT_DOCS, reader.numDocs());
+    reader.close();
+
+    addDirThreads.closeDir();
+    mainDir.close();
+  }
+  
+  private class AddDirectoriesThreads {
+    Directory addDir;
+    final static int NUM_THREADS = 5;
+    final static int NUM_INIT_DOCS = 100;
+    int numDirs;
+    final Thread[] threads = new Thread[NUM_THREADS];
+    IndexWriter mainWriter;
+    final List<Throwable> failures = new ArrayList<Throwable>();
+    IndexReader[] readers;
+    boolean didClose = false;
+    AtomicInteger count = new AtomicInteger(0);
+    AtomicInteger numaddIndexes = new AtomicInteger(0);
+    
+    public AddDirectoriesThreads(int numDirs, IndexWriter mainWriter) throws Throwable {
+      this.numDirs = numDirs;
+      this.mainWriter = mainWriter;
+      addDir = newDirectory();
+      IndexWriter writer = new IndexWriter(addDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
+      for (int i = 0; i < NUM_INIT_DOCS; i++) {
+        Document doc = DocHelper.createDocument(i, "addindex", 4);
+        writer.addDocument(doc);
+      }
+        
+      writer.close();
+      
+      readers = new IndexReader[numDirs];
+      for (int i = 0; i < numDirs; i++)
+        readers[i] = IndexReader.open(addDir, false);
+    }
+    
+    void joinThreads() {
+      for (int i = 0; i < NUM_THREADS; i++)
+        try {
+          threads[i].join();
+        } catch (InterruptedException ie) {
+          throw new ThreadInterruptedException(ie);
+        }
+    }
+
+    void close(boolean doWait) throws Throwable {
+      didClose = true;
+      if (doWait) {
+        mainWriter.waitForMerges();
+      }
+      mainWriter.close(doWait);
+    }
+
+    void closeDir() throws Throwable {
+      for (int i = 0; i < numDirs; i++)
+        readers[i].close();
+      addDir.close();
+    }
+    
+    void handle(Throwable t) {
+      t.printStackTrace(System.out);
+      synchronized (failures) {
+        failures.add(t);
+      }
+    }
+    
+    void launchThreads(final int numIter) {
+      for (int i = 0; i < NUM_THREADS; i++) {
+        threads[i] = new Thread() {
+          @Override
+          public void run() {
+            try {
+              final Directory[] dirs = new Directory[numDirs];
+              for (int k = 0; k < numDirs; k++)
+                dirs[k] = new MockDirectoryWrapper(random, new RAMDirectory(addDir));
+              //int j = 0;
+              //while (true) {
+                // System.out.println(Thread.currentThread().getName() + ": iter
+                // j=" + j);
+                for (int x=0; x < numIter; x++) {
+                  // only do addIndexes
+                  doBody(x, dirs);
+                }
+                //if (numIter > 0 && j == numIter)
+                //  break;
+                //doBody(j++, dirs);
+                //doBody(5, dirs);
+              //}
+            } catch (Throwable t) {
+              handle(t);
+            }
+          }
+        };
+      }
+      for (int i = 0; i < NUM_THREADS; i++)
+        threads[i].start();
+    }
+    
+    void doBody(int j, Directory[] dirs) throws Throwable {
+      switch (j % 4) {
+        case 0:
+          mainWriter.addIndexes(dirs);
+          mainWriter.optimize();
+          break;
+        case 1:
+          mainWriter.addIndexes(dirs);
+          numaddIndexes.incrementAndGet();
+          break;
+        case 2:
+          mainWriter.addIndexes(readers);
+          break;
+        case 3:
+          mainWriter.commit();
+      }
+      count.addAndGet(dirs.length*NUM_INIT_DOCS);
+    }
+  }
+
+  public void testIndexWriterReopenSegmentOptimize() throws Exception {
+    doTestIndexWriterReopenSegment(true);
+  }
+
+  public void testIndexWriterReopenSegment() throws Exception {
+    doTestIndexWriterReopenSegment(false);
+  }
+
+  /**
+   * Tests creating a segment, then check to insure the segment can be seen via
+   * IW.getReader
+   */
+  public void doTestIndexWriterReopenSegment(boolean optimize) throws Exception {
+    Directory dir1 = newDirectory();
+    IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer.setInfoStream(infoStream);
+    IndexReader r1 = writer.getReader();
+    assertEquals(0, r1.maxDoc());
+    createIndexNoClose(false, "index1", writer);
+    writer.flush(!optimize, true);
+
+    IndexReader iwr1 = writer.getReader();
+    assertEquals(100, iwr1.maxDoc());
+
+    IndexReader r2 = writer.getReader();
+    assertEquals(r2.maxDoc(), 100);
+    // add 100 documents
+    for (int x = 10000; x < 10000 + 100; x++) {
+      Document d = DocHelper.createDocument(x, "index1", 5);
+      writer.addDocument(d);
+    }
+    writer.flush(false, true);
+    // verify the reader was reopened internally
+    IndexReader iwr2 = writer.getReader();
+    assertTrue(iwr2 != r1);
+    assertEquals(200, iwr2.maxDoc());
+    // should have flushed out a segment
+    IndexReader r3 = writer.getReader();
+    assertTrue(r2 != r3);
+    assertEquals(200, r3.maxDoc());
+
+    // dec ref the readers rather than close them because
+    // closing flushes changes to the writer
+    r1.close();
+    iwr1.close();
+    r2.close();
+    r3.close();
+    iwr2.close();
+    writer.close();
+
+    // test whether the changes made it to the directory
+    writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    IndexReader w2r1 = writer.getReader();
+    // insure the deletes were actually flushed to the directory
+    assertEquals(200, w2r1.maxDoc());
+    w2r1.close();
+    writer.close();
+
+    dir1.close();
+  }
+ 
+  /*
+   * Delete a document by term and return the doc id
+   * 
+   * public static int deleteDocument(Term term, IndexWriter writer) throws
+   * IOException { IndexReader reader = writer.getReader(); TermDocs td =
+   * reader.termDocs(term); int doc = -1; //if (td.next()) { // doc = td.doc();
+   * //} //writer.deleteDocuments(term); td.close(); return doc; }
+   */
+  
+  public static void createIndex(Random random, Directory dir1, String indexName,
+      boolean multiSegment) throws IOException {
+    IndexWriter w = new IndexWriter(dir1, LuceneTestCase.newIndexWriterConfig(random,
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMergePolicy(new LogDocMergePolicy()));
+    for (int i = 0; i < 100; i++) {
+      w.addDocument(DocHelper.createDocument(i, indexName, 4));
+      if (multiSegment && (i % 10) == 0) {
+      }
+    }
+    if (!multiSegment) {
+      w.optimize();
+    }
+    w.close();
+  }
+
+  public static void createIndexNoClose(boolean multiSegment, String indexName,
+      IndexWriter w) throws IOException {
+    for (int i = 0; i < 100; i++) {
+      w.addDocument(DocHelper.createDocument(i, indexName, 4));
+    }
+    if (!multiSegment) {
+      w.optimize();
+    }
+  }
+
+  private static class MyWarmer extends IndexWriter.IndexReaderWarmer {
+    int warmCount;
+    @Override
+    public void warm(IndexReader reader) throws IOException {
+      warmCount++;
+    }
+  }
+
+  public void testMergeWarmer() throws Exception {
+
+    Directory dir1 = newDirectory();
+    // Enroll warmer
+    MyWarmer warmer = new MyWarmer();
+    IndexWriter writer = new IndexWriter(
+        dir1,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(2).
+            setMergedSegmentWarmer(warmer).
+            setMergeScheduler(new ConcurrentMergeScheduler()).
+            setMergePolicy(newLogMergePolicy())
+    );
+    writer.setInfoStream(infoStream);
+
+    // create the index
+    createIndexNoClose(false, "test", writer);
+
+    // get a reader to put writer into near real-time mode
+    IndexReader r1 = writer.getReader();
+    
+    ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
+
+    int num = atLeast(100);
+    for (int i = 0; i < num; i++) {
+      writer.addDocument(DocHelper.createDocument(i, "test", 4));
+    }
+    ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
+
+    assertTrue(warmer.warmCount > 0);
+    final int count = warmer.warmCount;
+
+    writer.addDocument(DocHelper.createDocument(17, "test", 4));
+    writer.optimize();
+    assertTrue(warmer.warmCount > count);
+    
+    writer.close();
+    r1.close();
+    dir1.close();
+  }
+
+  public void testAfterCommit() throws Exception {
+    Directory dir1 = newDirectory();
+    IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergeScheduler(new ConcurrentMergeScheduler()));
+    writer.commit();
+    writer.setInfoStream(infoStream);
+
+    // create the index
+    createIndexNoClose(false, "test", writer);
+
+    // get a reader to put writer into near real-time mode
+    IndexReader r1 = writer.getReader();
+    _TestUtil.checkIndex(dir1);
+    writer.commit();
+    _TestUtil.checkIndex(dir1);
+    assertEquals(100, r1.numDocs());
+
+    for (int i = 0; i < 10; i++) {
+      writer.addDocument(DocHelper.createDocument(i, "test", 4));
+    }
+    ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
+
+    IndexReader r2 = r1.reopen();
+    if (r2 != r1) {
+      r1.close();
+      r1 = r2;
+    }
+    assertEquals(110, r1.numDocs());
+    writer.close();
+    r1.close();
+    dir1.close();
+  }
+
+  // Make sure reader remains usable even if IndexWriter closes
+  public void testAfterClose() throws Exception {
+    Directory dir1 = newDirectory();
+    IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer.setInfoStream(infoStream);
+
+    // create the index
+    createIndexNoClose(false, "test", writer);
+
+    IndexReader r = writer.getReader();
+    writer.close();
+
+    _TestUtil.checkIndex(dir1);
+
+    // reader should remain usable even after IndexWriter is closed:
+    assertEquals(100, r.numDocs());
+    Query q = new TermQuery(new Term("indexname", "test"));
+    IndexSearcher searcher = newSearcher(r);
+    assertEquals(100, searcher.search(q, 10).totalHits);
+    searcher.close();
+    try {
+      r.reopen();
+      fail("failed to hit AlreadyClosedException");
+    } catch (AlreadyClosedException ace) {
+      // expected
+    }
+    r.close();
+    dir1.close();
+  }
+
+  // Stress test reopen during addIndexes
+  public void testDuringAddIndexes() throws Exception {
+    MockDirectoryWrapper dir1 = newDirectory();
+    final IndexWriter writer = new IndexWriter(
+        dir1,
+        newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMergePolicy(newLogMergePolicy(2))
+    );
+    writer.setInfoStream(infoStream);
+    ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
+
+    // create the index
+    createIndexNoClose(false, "test", writer);
+    writer.commit();
+
+    final Directory[] dirs = new Directory[10];
+    for (int i=0;i<10;i++) {
+      dirs[i] = new MockDirectoryWrapper(random, new RAMDirectory(dir1));
+    }
+
+    IndexReader r = writer.getReader();
+
+    final int NUM_THREAD = 5;
+    final float SECONDS = 0.5f;
+
+    final long endTime = (long) (System.currentTimeMillis() + 1000.*SECONDS);
+    final List<Throwable> excs = Collections.synchronizedList(new ArrayList<Throwable>());
+
+    final Thread[] threads = new Thread[NUM_THREAD];
+    for(int i=0;i<NUM_THREAD;i++) {
+      threads[i] = new Thread() {
+          @Override
+          public void run() {
+            do {
+              try {
+                writer.addIndexes(dirs);
+                writer.maybeMerge();
+              } catch (Throwable t) {
+                excs.add(t);
+                throw new RuntimeException(t);
+              }
+            } while(System.currentTimeMillis() < endTime);
+          }
+        };
+      threads[i].setDaemon(true);
+      threads[i].start();
+    }
+
+    int lastCount = 0;
+    while(System.currentTimeMillis() < endTime) {
+      IndexReader r2 = r.reopen();
+      if (r2 != r) {
+        r.close();
+        r = r2;
+      }
+      Query q = new TermQuery(new Term("indexname", "test"));
+      IndexSearcher searcher = newSearcher(r);
+      final int count = searcher.search(q, 10).totalHits;
+      searcher.close();
+      assertTrue(count >= lastCount);
+      lastCount = count;
+    }
+
+    for(int i=0;i<NUM_THREAD;i++) {
+      threads[i].join();
+    }
+    // final check
+    IndexReader r2 = r.reopen();
+    if (r2 != r) {
+      r.close();
+      r = r2;
+    }
+    Query q = new TermQuery(new Term("indexname", "test"));
+    IndexSearcher searcher = newSearcher(r);
+    final int count = searcher.search(q, 10).totalHits;
+    searcher.close();
+    assertTrue(count >= lastCount);
+
+    assertEquals(0, excs.size());
+    r.close();
+    assertEquals(0, dir1.getOpenDeletedFiles().size());
+
+    writer.close();
+
+    dir1.close();
+  }
+
+  // Stress test reopen during add/delete
+  public void testDuringAddDelete() throws Exception {
+    Directory dir1 = newDirectory();
+    final IndexWriter writer = new IndexWriter(
+        dir1,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMergePolicy(newLogMergePolicy(2))
+    );
+    writer.setInfoStream(infoStream);
+    ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
+
+    // create the index
+    createIndexNoClose(false, "test", writer);
+    writer.commit();
+
+    IndexReader r = writer.getReader();
+
+    final int NUM_THREAD = 5;
+    final float SECONDS = 0.5f;
+
+    final long endTime = (long) (System.currentTimeMillis() + 1000.*SECONDS);
+    final List<Throwable> excs = Collections.synchronizedList(new ArrayList<Throwable>());
+
+    final Thread[] threads = new Thread[NUM_THREAD];
+    for(int i=0;i<NUM_THREAD;i++) {
+      threads[i] = new Thread() {
+          final Random r = new Random(random.nextLong());
+
+          @Override
+          public void run() {
+            int count = 0;
+            do {
+              try {
+                for(int docUpto=0;docUpto<10;docUpto++) {
+                  writer.addDocument(DocHelper.createDocument(10*count+docUpto, "test", 4));
+                }
+                count++;
+                final int limit = count*10;
+                for(int delUpto=0;delUpto<5;delUpto++) {
+                  int x = r.nextInt(limit);
+                  writer.deleteDocuments(new Term("field3", "b"+x));
+                }
+              } catch (Throwable t) {
+                excs.add(t);
+                throw new RuntimeException(t);
+              }
+            } while(System.currentTimeMillis() < endTime);
+          }
+        };
+      threads[i].setDaemon(true);
+      threads[i].start();
+    }
+
+    int sum = 0;
+    while(System.currentTimeMillis() < endTime) {
+      IndexReader r2 = r.reopen();
+      if (r2 != r) {
+        r.close();
+        r = r2;
+      }
+      Query q = new TermQuery(new Term("indexname", "test"));
+      IndexSearcher searcher = newSearcher(r);
+      sum += searcher.search(q, 10).totalHits;
+      searcher.close();
+    }
+
+    for(int i=0;i<NUM_THREAD;i++) {
+      threads[i].join();
+    }
+    // at least search once
+    IndexReader r2 = r.reopen();
+    if (r2 != r) {
+      r.close();
+      r = r2;
+    }
+    Query q = new TermQuery(new Term("indexname", "test"));
+    IndexSearcher searcher = newSearcher(r);
+    sum += searcher.search(q, 10).totalHits;
+    searcher.close();
+    assertTrue("no documents found at all", sum > 0);
+
+    assertEquals(0, excs.size());
+    writer.close();
+
+    r.close();
+    dir1.close();
+  }
+
+  public void testExpungeDeletes() throws Throwable {
+    Directory dir = newDirectory();
+    final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    Document doc = new Document();
+    doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+    Field id = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
+    doc.add(id);
+    id.setValue("0");
+    w.addDocument(doc);
+    id.setValue("1");
+    w.addDocument(doc);
+    w.deleteDocuments(new Term("id", "0"));
+
+    IndexReader r = w.getReader();
+    w.expungeDeletes();
+    w.close();
+    r.close();
+    r = IndexReader.open(dir, true);
+    assertEquals(1, r.numDocs());
+    assertFalse(r.hasDeletions());
+    r.close();
+    dir.close();
+  }
+
+  public void testDeletesNumDocs() throws Throwable {
+    Directory dir = newDirectory();
+    final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+    Field id = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
+    doc.add(id);
+    id.setValue("0");
+    w.addDocument(doc);
+    id.setValue("1");
+    w.addDocument(doc);
+    IndexReader r = w.getReader();
+    assertEquals(2, r.numDocs());
+    r.close();
+
+    w.deleteDocuments(new Term("id", "0"));
+    r = w.getReader();
+    assertEquals(1, r.numDocs());
+    r.close();
+
+    w.deleteDocuments(new Term("id", "1"));
+    r = w.getReader();
+    assertEquals(0, r.numDocs());
+    r.close();
+
+    w.close();
+    dir.close();
+  }
+  
+  public void testEmptyIndex() throws Exception {
+    // Ensures that getReader works on an empty index, which hasn't been committed yet.
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    IndexReader r = w.getReader();
+    assertEquals(0, r.numDocs());
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSegmentWarmer() throws Exception {
+    Directory dir = newDirectory();
+    final AtomicBoolean didWarm = new AtomicBoolean();
+    IndexWriter w = new IndexWriter(
+        dir,
+        newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(2).
+            setReaderPooling(true).
+            setMergedSegmentWarmer(new IndexWriter.IndexReaderWarmer() {
+              @Override
+              public void warm(IndexReader r) throws IOException {
+                IndexSearcher s = newSearcher(r);
+                TopDocs hits = s.search(new TermQuery(new Term("foo", "bar")), 10);
+                assertEquals(20, hits.totalHits);
+                didWarm.set(true);
+                s.close();
+              }
+            }).
+            setMergePolicy(newLogMergePolicy(10))
+    );
+
+    Document doc = new Document();
+    doc.add(newField("foo", "bar", Field.Store.YES, Field.Index.NOT_ANALYZED));
+    for(int i=0;i<20;i++) {
+      w.addDocument(doc);
+    }
+    w.waitForMerges();
+    w.close();
+    dir.close();
+    assertTrue(didWarm.get());
+  }
+  
+  public void testNoTermsIndex() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
+        .setReaderTermsIndexDivisor(-1));
+    Document doc = new Document();
+    doc.add(new Field("f", "val", Store.NO, Index.ANALYZED));
+    w.addDocument(doc);
+    IndexReader r = IndexReader.open(w, true);
+    try {
+      r.termDocs(new Term("f", "val"));
+      fail("should have failed to seek since terms index was not loaded");
+    } catch (IllegalStateException e) {
+      // expected - we didn't load the term index
+    } finally {
+      r.close();
+      w.close();
+      dir.close();
+    }
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java
new file mode 100644
index 0000000..f6df136
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java
@@ -0,0 +1,288 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Random;
+import java.util.Set;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.UnicodeUtil;
+
+public class TestIndexWriterUnicode extends LuceneTestCase {
+
+  final String[] utf8Data = new String[] {
+    // unpaired low surrogate
+    "ab\udc17cd", "ab\ufffdcd",
+    "\udc17abcd", "\ufffdabcd",
+    "\udc17", "\ufffd",
+    "ab\udc17\udc17cd", "ab\ufffd\ufffdcd",
+    "\udc17\udc17abcd", "\ufffd\ufffdabcd",
+    "\udc17\udc17", "\ufffd\ufffd",
+
+    // unpaired high surrogate
+    "ab\ud917cd", "ab\ufffdcd",
+    "\ud917abcd", "\ufffdabcd",
+    "\ud917", "\ufffd",
+    "ab\ud917\ud917cd", "ab\ufffd\ufffdcd",
+    "\ud917\ud917abcd", "\ufffd\ufffdabcd",
+    "\ud917\ud917", "\ufffd\ufffd",
+
+    // backwards surrogates
+    "ab\udc17\ud917cd", "ab\ufffd\ufffdcd",
+    "\udc17\ud917abcd", "\ufffd\ufffdabcd",
+    "\udc17\ud917", "\ufffd\ufffd",
+    "ab\udc17\ud917\udc17\ud917cd", "ab\ufffd\ud917\udc17\ufffdcd",
+    "\udc17\ud917\udc17\ud917abcd", "\ufffd\ud917\udc17\ufffdabcd",
+    "\udc17\ud917\udc17\ud917", "\ufffd\ud917\udc17\ufffd"
+  };
+  
+  private int nextInt(int lim) {
+    return random.nextInt(lim);
+  }
+
+  private int nextInt(int start, int end) {
+    return start + nextInt(end-start);
+  }
+  
+  private boolean fillUnicode(char[] buffer, char[] expected, int offset, int count) {
+    final int len = offset + count;
+    boolean hasIllegal = false;
+
+    if (offset > 0 && buffer[offset] >= 0xdc00 && buffer[offset] < 0xe000)
+      // Don't start in the middle of a valid surrogate pair
+      offset--;
+
+    for(int i=offset;i<len;i++) {
+      int t = nextInt(6);
+      if (0 == t && i < len-1) {
+        // Make a surrogate pair
+        // High surrogate
+        expected[i] = buffer[i++] = (char) nextInt(0xd800, 0xdc00);
+        // Low surrogate
+        expected[i] = buffer[i] = (char) nextInt(0xdc00, 0xe000);
+      } else if (t <= 1)
+        expected[i] = buffer[i] = (char) nextInt(0x80);
+      else if (2 == t)
+        expected[i] = buffer[i] = (char) nextInt(0x80, 0x800);
+      else if (3 == t)
+        expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
+      else if (4 == t)
+        expected[i] = buffer[i] = (char) nextInt(0xe000, 0xffff);
+      else if (5 == t && i < len-1) {
+        // Illegal unpaired surrogate
+        if (nextInt(10) == 7) {
+          if (random.nextBoolean())
+            buffer[i] = (char) nextInt(0xd800, 0xdc00);
+          else
+            buffer[i] = (char) nextInt(0xdc00, 0xe000);
+          expected[i++] = 0xfffd;
+          expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
+          hasIllegal = true;
+        } else
+          expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
+      } else {
+        expected[i] = buffer[i] = ' ';
+      }
+    }
+
+    return hasIllegal;
+  }
+  
+  // both start & end are inclusive
+  private final int getInt(Random r, int start, int end) {
+    return start + r.nextInt(1+end-start);
+  }
+
+  private final String asUnicodeChar(char c) {
+    return "U+" + Integer.toHexString(c);
+  }
+
+  private final String termDesc(String s) {
+    final String s0;
+    assertTrue(s.length() <= 2);
+    if (s.length() == 1) {
+      s0 = asUnicodeChar(s.charAt(0));
+    } else {
+      s0 = asUnicodeChar(s.charAt(0)) + "," + asUnicodeChar(s.charAt(1));
+    }
+    return s0;
+  }
+
+  // LUCENE-510
+  public void testRandomUnicodeStrings() throws Throwable {
+    char[] buffer = new char[20];
+    char[] expected = new char[20];
+
+    UnicodeUtil.UTF8Result utf8 = new UnicodeUtil.UTF8Result();
+    UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
+
+    int num = atLeast(100000);
+    for (int iter = 0; iter < num; iter++) {
+      boolean hasIllegal = fillUnicode(buffer, expected, 0, 20);
+
+      UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
+      if (!hasIllegal) {
+        byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
+        assertEquals(b.length, utf8.length);
+        for(int i=0;i<b.length;i++)
+          assertEquals(b[i], utf8.result[i]);
+      }
+
+      UnicodeUtil.UTF8toUTF16(utf8.result, 0, utf8.length, utf16);
+      assertEquals(utf16.length, 20);
+      for(int i=0;i<20;i++)
+        assertEquals(expected[i], utf16.result[i]);
+    }
+  }
+
+  // LUCENE-510
+  public void testAllUnicodeChars() throws Throwable {
+
+    UnicodeUtil.UTF8Result utf8 = new UnicodeUtil.UTF8Result();
+    UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
+    char[] chars = new char[2];
+    for(int ch=0;ch<0x0010FFFF;ch++) {
+
+      if (ch == 0xd800)
+        // Skip invalid code points
+        ch = 0xe000;
+
+      int len = 0;
+      if (ch <= 0xffff) {
+        chars[len++] = (char) ch;
+      } else {
+        chars[len++] = (char) (((ch-0x0010000) >> 10) + UnicodeUtil.UNI_SUR_HIGH_START);
+        chars[len++] = (char) (((ch-0x0010000) & 0x3FFL) + UnicodeUtil.UNI_SUR_LOW_START);
+      }
+
+      UnicodeUtil.UTF16toUTF8(chars, 0, len, utf8);
+      
+      String s1 = new String(chars, 0, len);
+      String s2 = new String(utf8.result, 0, utf8.length, "UTF-8");
+      assertEquals("codepoint " + ch, s1, s2);
+
+      UnicodeUtil.UTF8toUTF16(utf8.result, 0, utf8.length, utf16);
+      assertEquals("codepoint " + ch, s1, new String(utf16.result, 0, utf16.length));
+
+      byte[] b = s1.getBytes("UTF-8");
+      assertEquals(utf8.length, b.length);
+      for(int j=0;j<utf8.length;j++)
+        assertEquals(utf8.result[j], b[j]);
+    }
+  }
+  
+  public void testEmbeddedFFFF() throws Throwable {
+
+    Directory d = newDirectory();
+    IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new TestIndexWriter.StringSplitAnalyzer()));
+    Document doc = new Document();
+    doc.add(newField("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
+    w.addDocument(doc);
+    doc = new Document();
+    doc.add(newField("field", "a", Field.Store.NO, Field.Index.ANALYZED));
+    w.addDocument(doc);
+    w.close();
+
+    d.close();
+  }
+
+  // LUCENE-510
+  public void testInvalidUTF16() throws Throwable {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new TestIndexWriter.StringSplitAnalyzer()));
+    Document doc = new Document();
+
+    final int count = utf8Data.length/2;
+    for(int i=0;i<count;i++)
+      doc.add(newField("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
+    w.addDocument(doc);
+    w.close();
+
+    IndexReader ir = IndexReader.open(dir, true);
+    Document doc2 = ir.document(0);
+    for(int i=0;i<count;i++) {
+      assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
+      assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
+    }
+    ir.close();
+    dir.close();
+  }
+  
+  // LUCENE-510
+  public void testIncrementalUnicodeStrings() throws Throwable {
+    char[] buffer = new char[20];
+    char[] expected = new char[20];
+
+    UnicodeUtil.UTF8Result utf8 = new UnicodeUtil.UTF8Result();
+    UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
+    UnicodeUtil.UTF16Result utf16a = new UnicodeUtil.UTF16Result();
+
+    boolean hasIllegal = false;
+    byte[] last = new byte[60];
+
+    int num = atLeast(100000);
+    for (int iter = 0; iter < num; iter++) {
+
+      final int prefix;
+
+      if (iter == 0 || hasIllegal)
+        prefix = 0;
+      else
+        prefix = nextInt(20);
+
+      hasIllegal = fillUnicode(buffer, expected, prefix, 20-prefix);
+
+      UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
+      if (!hasIllegal) {
+        byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
+        assertEquals(b.length, utf8.length);
+        for(int i=0;i<b.length;i++)
+          assertEquals(b[i], utf8.result[i]);
+      }
+
+      int bytePrefix = 20;
+      if (iter == 0 || hasIllegal)
+        bytePrefix = 0;
+      else
+        for(int i=0;i<20;i++)
+          if (last[i] != utf8.result[i]) {
+            bytePrefix = i;
+            break;
+          }
+      System.arraycopy(utf8.result, 0, last, 0, utf8.length);
+
+      UnicodeUtil.UTF8toUTF16(utf8.result, bytePrefix, utf8.length-bytePrefix, utf16);
+      assertEquals(20, utf16.length);
+      for(int i=0;i<20;i++)
+        assertEquals(expected[i], utf16.result[i]);
+
+      UnicodeUtil.UTF8toUTF16(utf8.result, 0, utf8.length, utf16a);
+      assertEquals(20, utf16a.length);
+      for(int i=0;i<20;i++)
+        assertEquals(expected[i], utf16a.result[i]);
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
new file mode 100644
index 0000000..885d0b9
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
@@ -0,0 +1,472 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.concurrent.CountDownLatch;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.ThreadInterruptedException;
+
+/**
+ * MultiThreaded IndexWriter tests
+ */
+public class TestIndexWriterWithThreads extends LuceneTestCase {
+
+  // Used by test cases below
+  private class IndexerThread extends Thread {
+
+    boolean diskFull;
+    Throwable error;
+    AlreadyClosedException ace;
+    IndexWriter writer;
+    boolean noErrors;
+    volatile int addCount;
+
+    public IndexerThread(IndexWriter writer, boolean noErrors) {
+      this.writer = writer;
+      this.noErrors = noErrors;
+    }
+
+    @Override
+    public void run() {
+
+      final Document doc = new Document();
+      doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+
+      int idUpto = 0;
+      int fullCount = 0;
+      final long stopTime = System.currentTimeMillis() + 200;
+
+      do {
+        try {
+          writer.updateDocument(new Term("id", ""+(idUpto++)), doc);
+          addCount++;
+        } catch (IOException ioe) {
+          if (VERBOSE) {
+            System.out.println("TEST: expected exc:");
+            ioe.printStackTrace(System.out);
+          }
+          //System.out.println(Thread.currentThread().getName() + ": hit exc");
+          //ioe.printStackTrace(System.out);
+          if (ioe.getMessage().startsWith("fake disk full at") ||
+              ioe.getMessage().equals("now failing on purpose")) {
+            diskFull = true;
+            try {
+              Thread.sleep(1);
+            } catch (InterruptedException ie) {
+              throw new ThreadInterruptedException(ie);
+            }
+            if (fullCount++ >= 5)
+              break;
+          } else {
+            if (noErrors) {
+              System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected IOException:");
+              ioe.printStackTrace(System.out);
+              error = ioe;
+            }
+            break;
+          }
+        } catch (Throwable t) {
+          //t.printStackTrace(System.out);
+          if (noErrors) {
+            System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected Throwable:");
+            t.printStackTrace(System.out);
+            error = t;
+          }
+          break;
+        }
+      } while(System.currentTimeMillis() < stopTime);
+    }
+  }
+
+  // LUCENE-1130: make sure immediate disk full on creating
+  // an IndexWriter (hit during DW.ThreadState.init()), with
+  // multiple threads, is OK:
+  public void testImmediateDiskFullWithThreads() throws Exception {
+
+    int NUM_THREADS = 3;
+
+    for(int iter=0;iter<10;iter++) {
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter);
+      }
+      MockDirectoryWrapper dir = newDirectory();
+      IndexWriter writer = new IndexWriter(
+          dir,
+          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+              setMaxBufferedDocs(2).
+              setMergeScheduler(new ConcurrentMergeScheduler()).
+              setMergePolicy(newLogMergePolicy(4))
+      );
+      ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).setSuppressExceptions();
+      dir.setMaxSizeInBytes(4*1024+20*iter);
+      writer.setInfoStream(VERBOSE ? System.out : null);
+
+      IndexerThread[] threads = new IndexerThread[NUM_THREADS];
+
+      for(int i=0;i<NUM_THREADS;i++)
+        threads[i] = new IndexerThread(writer, true);
+
+      for(int i=0;i<NUM_THREADS;i++)
+        threads[i].start();
+
+      for(int i=0;i<NUM_THREADS;i++) {
+        // Without fix for LUCENE-1130: one of the
+        // threads will hang
+        threads[i].join();
+        assertTrue("hit unexpected Throwable", threads[i].error == null);
+      }
+
+      // Make sure once disk space is avail again, we can
+      // cleanly close:
+      dir.setMaxSizeInBytes(0);
+      writer.close(false);
+      dir.close();
+    }
+  }
+  
+
+  // LUCENE-1130: make sure we can close() even while
+  // threads are trying to add documents.  Strictly
+  // speaking, this isn't valid us of Lucene's APIs, but we
+  // still want to be robust to this case:
+  public void testCloseWithThreads() throws Exception {
+    int NUM_THREADS = 3;
+
+    for(int iter=0;iter<7;iter++) {
+      Directory dir = newDirectory();
+      IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(10).setMergeScheduler(new ConcurrentMergeScheduler()).setMergePolicy(newLogMergePolicy(4));
+      // We expect AlreadyClosedException
+      ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
+      IndexWriter writer = new IndexWriter(dir, conf);
+
+      IndexerThread[] threads = new IndexerThread[NUM_THREADS];
+
+      for(int i=0;i<NUM_THREADS;i++)
+        threads[i] = new IndexerThread(writer, false);
+
+      for(int i=0;i<NUM_THREADS;i++)
+        threads[i].start();
+
+      boolean done = false;
+      while(!done) {
+        Thread.sleep(100);
+        for(int i=0;i<NUM_THREADS;i++)
+          // only stop when at least one thread has added a doc
+          if (threads[i].addCount > 0) {
+            done = true;
+            break;
+          } else if (!threads[i].isAlive()) {
+            fail("thread failed before indexing a single document");
+          }
+      }
+
+      writer.close(false);
+
+      // Make sure threads that are adding docs are not hung:
+      for(int i=0;i<NUM_THREADS;i++) {
+        // Without fix for LUCENE-1130: one of the
+        // threads will hang
+        threads[i].join();
+        if (threads[i].isAlive())
+          fail("thread seems to be hung");
+      }
+
+      // Quick test to make sure index is not corrupt:
+      IndexReader reader = IndexReader.open(dir, true);
+      TermDocs tdocs = reader.termDocs(new Term("field", "aaa"));
+      int count = 0;
+      while(tdocs.next()) {
+        count++;
+      }
+      assertTrue(count > 0);
+      reader.close();
+      
+      dir.close();
+    }
+  }
+
+  // Runs test, with multiple threads, using the specific
+  // failure to trigger an IOException
+  public void _testMultipleThreadsFailure(MockDirectoryWrapper.Failure failure) throws Exception {
+
+    int NUM_THREADS = 3;
+
+    for(int iter=0;iter<2;iter++) {
+      if (VERBOSE) {
+        System.out.println("TEST: iter=" + iter);
+      }
+      MockDirectoryWrapper dir = newDirectory();
+      IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
+          new MockAnalyzer(random)).setMaxBufferedDocs(2)
+          .setMergeScheduler(new ConcurrentMergeScheduler())
+          .setMergePolicy(newLogMergePolicy(4));
+      // We expect disk full exceptions in the merge threads
+      ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
+      IndexWriter writer = new IndexWriter(dir, conf);
+      writer.setInfoStream(VERBOSE ? System.out : null);
+      
+      IndexerThread[] threads = new IndexerThread[NUM_THREADS];
+
+      for(int i=0;i<NUM_THREADS;i++)
+        threads[i] = new IndexerThread(writer, true);
+
+      for(int i=0;i<NUM_THREADS;i++)
+        threads[i].start();
+
+      Thread.sleep(10);
+
+      dir.failOn(failure);
+      failure.setDoFail();
+
+      for(int i=0;i<NUM_THREADS;i++) {
+        threads[i].join();
+        assertTrue("hit unexpected Throwable", threads[i].error == null);
+      }
+
+      boolean success = false;
+      try {
+        writer.close(false);
+        success = true;
+      } catch (IOException ioe) {
+        failure.clearDoFail();
+        writer.close(false);
+      }
+
+      if (success) {
+        IndexReader reader = IndexReader.open(dir, true);
+        for(int j=0;j<reader.maxDoc();j++) {
+          if (!reader.isDeleted(j)) {
+            reader.document(j);
+            reader.getTermFreqVectors(j);
+          }
+        }
+        reader.close();
+      }
+
+      dir.close();
+    }
+  }
+
+  // Runs test, with one thread, using the specific failure
+  // to trigger an IOException
+  public void _testSingleThreadFailure(MockDirectoryWrapper.Failure failure) throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
+      .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
+    final Document doc = new Document();
+    doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+
+    for(int i=0;i<6;i++)
+      writer.addDocument(doc);
+
+    dir.failOn(failure);
+    failure.setDoFail();
+    try {
+      writer.addDocument(doc);
+      writer.addDocument(doc);
+      writer.commit();
+      fail("did not hit exception");
+    } catch (IOException ioe) {
+    }
+    failure.clearDoFail();
+    writer.addDocument(doc);
+    writer.close(false);
+    dir.close();
+  }
+
+  // Throws IOException during FieldsWriter.flushDocument and during DocumentsWriter.abort
+  private static class FailOnlyOnAbortOrFlush extends MockDirectoryWrapper.Failure {
+    private boolean onlyOnce;
+    public FailOnlyOnAbortOrFlush(boolean onlyOnce) {
+      this.onlyOnce = onlyOnce;
+    }
+    @Override
+    public void eval(MockDirectoryWrapper dir)  throws IOException {
+      if (doFail) {
+        StackTraceElement[] trace = new Exception().getStackTrace();
+        boolean sawAbortOrFlushDoc = false;
+        boolean sawClose = false;
+        for (int i = 0; i < trace.length; i++) {
+          if ("abort".equals(trace[i].getMethodName()) ||
+              "flushDocument".equals(trace[i].getMethodName())) {
+            sawAbortOrFlushDoc = true;
+          }
+          if ("close".equals(trace[i].getMethodName())) {
+            sawClose = true;
+          }
+        }
+        if (sawAbortOrFlushDoc && !sawClose) {
+          if (onlyOnce)
+            doFail = false;
+          //System.out.println(Thread.currentThread().getName() + ": now fail");
+          //new Throwable().printStackTrace(System.out);
+          throw new IOException("now failing on purpose");
+        }
+      }
+    }
+  }
+
+
+
+  // LUCENE-1130: make sure initial IOException, and then 2nd
+  // IOException during rollback(), is OK:
+  public void testIOExceptionDuringAbort() throws IOException {
+    _testSingleThreadFailure(new FailOnlyOnAbortOrFlush(false));
+  }
+
+  // LUCENE-1130: make sure initial IOException, and then 2nd
+  // IOException during rollback(), is OK:
+  public void testIOExceptionDuringAbortOnlyOnce() throws IOException {
+    _testSingleThreadFailure(new FailOnlyOnAbortOrFlush(true));
+  }
+
+  // LUCENE-1130: make sure initial IOException, and then 2nd
+  // IOException during rollback(), with multiple threads, is OK:
+  public void testIOExceptionDuringAbortWithThreads() throws Exception {
+    _testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(false));
+  }
+
+  // LUCENE-1130: make sure initial IOException, and then 2nd
+  // IOException during rollback(), with multiple threads, is OK:
+  public void testIOExceptionDuringAbortWithThreadsOnlyOnce() throws Exception {
+    _testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(true));
+  }
+
+  // Throws IOException during DocumentsWriter.writeSegment
+  private static class FailOnlyInWriteSegment extends MockDirectoryWrapper.Failure {
+    private boolean onlyOnce;
+    public FailOnlyInWriteSegment(boolean onlyOnce) {
+      this.onlyOnce = onlyOnce;
+    }
+    @Override
+    public void eval(MockDirectoryWrapper dir)  throws IOException {
+      if (doFail) {
+        StackTraceElement[] trace = new Exception().getStackTrace();
+        for (int i = 0; i < trace.length; i++) {
+          if ("flush".equals(trace[i].getMethodName()) && "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
+            if (onlyOnce)
+              doFail = false;
+            throw new IOException("now failing on purpose");
+          }
+        }
+      }
+    }
+  }
+
+  // LUCENE-1130: test IOException in writeSegment
+  public void testIOExceptionDuringWriteSegment() throws IOException {
+    _testSingleThreadFailure(new FailOnlyInWriteSegment(false));
+  }
+
+  // LUCENE-1130: test IOException in writeSegment
+  public void testIOExceptionDuringWriteSegmentOnlyOnce() throws IOException {
+    _testSingleThreadFailure(new FailOnlyInWriteSegment(true));
+  }
+
+  // LUCENE-1130: test IOException in writeSegment, with threads
+  public void testIOExceptionDuringWriteSegmentWithThreads() throws Exception {
+    _testMultipleThreadsFailure(new FailOnlyInWriteSegment(false));
+  }
+
+  // LUCENE-1130: test IOException in writeSegment, with threads
+  public void testIOExceptionDuringWriteSegmentWithThreadsOnlyOnce() throws Exception {
+    _testMultipleThreadsFailure(new FailOnlyInWriteSegment(true));
+  }
+  
+  //  LUCENE-3365: Test adding two documents with the same field from two different IndexWriters 
+  //  that we attempt to open at the same time.  As long as the first IndexWriter completes
+  //  and closes before the second IndexWriter time's out trying to get the Lock,
+  //  we should see both documents
+  public void testOpenTwoIndexWritersOnDifferentThreads() throws IOException, InterruptedException {
+     final MockDirectoryWrapper dir = newDirectory();
+     CountDownLatch oneIWConstructed = new CountDownLatch(1);
+     DelayedIndexAndCloseRunnable thread1 = new DelayedIndexAndCloseRunnable(
+         dir, oneIWConstructed);
+     DelayedIndexAndCloseRunnable thread2 = new DelayedIndexAndCloseRunnable(
+         dir, oneIWConstructed);
+
+     thread1.start();
+     thread2.start();
+     oneIWConstructed.await();
+
+     thread1.startIndexing();
+     thread2.startIndexing();
+
+     thread1.join();
+     thread2.join();
+     
+     assertFalse("Failed due to: " + thread1.failure, thread1.failed);
+     assertFalse("Failed due to: " + thread2.failure, thread2.failed);
+     // now verify that we have two documents in the index
+     IndexReader reader = IndexReader.open(dir, true);
+     assertEquals("IndexReader should have one document per thread running", 2,
+         reader.numDocs());
+     
+     reader.close();
+     dir.close();
+  }
+  
+   static class DelayedIndexAndCloseRunnable extends Thread {
+     private final Directory dir;
+     boolean failed = false;
+     Throwable failure = null;
+     private final CountDownLatch startIndexing = new CountDownLatch(1);
+     private CountDownLatch iwConstructed;
+
+     public DelayedIndexAndCloseRunnable(Directory dir,
+         CountDownLatch iwConstructed) {
+       this.dir = dir;
+       this.iwConstructed = iwConstructed;
+     }
+
+     public void startIndexing() {
+       this.startIndexing.countDown();
+     }
+
+     @Override
+     public void run() {
+       try {
+         Document doc = new Document();
+         Field field = newField("field", "testData", Field.Store.YES,
+             Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+         doc.add(field);
+         IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+             TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+         iwConstructed.countDown();
+         startIndexing.await();
+         writer.addDocument(doc);
+         writer.close();
+       } catch (Throwable e) {
+         failed = true;
+         failure = e;
+         failure.printStackTrace(System.out);
+         return;
+       }
+     }
+   }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestIsCurrent.java b/lucene/backwards/src/test/org/apache/lucene/index/TestIsCurrent.java
new file mode 100644
index 0000000..524108d
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestIsCurrent.java
@@ -0,0 +1,110 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.util.*;
+import org.apache.lucene.store.*;
+
+import org.junit.Test;
+
+import java.io.IOException;
+
+public class TestIsCurrent extends LuceneTestCase {
+
+  private RandomIndexWriter writer;
+
+  private Directory directory;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+
+    // initialize directory
+    directory = newDirectory();
+    writer = new RandomIndexWriter(random, directory);
+
+    // write document
+    Document doc = new Document();
+    doc.add(newField("UUID", "1", Store.YES, Index.ANALYZED));
+    writer.addDocument(doc);
+    writer.commit();
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    super.tearDown();
+    writer.close();
+    directory.close();
+  }
+
+  /**
+   * Failing testcase showing the trouble
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testDeleteByTermIsCurrent() throws IOException {
+
+    // get reader
+    IndexReader reader = writer.getReader();
+
+    // assert index has a document and reader is up2date 
+    assertEquals("One document should be in the index", 1, writer.numDocs());
+    assertTrue("One document added, reader should be current", reader.isCurrent());
+
+    // remove document
+    Term idTerm = new Term("UUID", "1");
+    writer.deleteDocuments(idTerm);
+    writer.commit();
+
+    // assert document has been deleted (index changed), reader is stale
+    assertEquals("Document should be removed", 0, writer.numDocs());
+    assertFalse("Reader should be stale", reader.isCurrent());
+
+    reader.close();
+  }
+
+  /**
+   * Testcase for example to show that writer.deleteAll() is working as expected
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testDeleteAllIsCurrent() throws IOException {
+
+    // get reader
+    IndexReader reader = writer.getReader();
+
+    // assert index has a document and reader is up2date 
+    assertEquals("One document should be in the index", 1, writer.numDocs());
+    assertTrue("Document added, reader should be stale ", reader.isCurrent());
+
+    // remove all documents
+    writer.deleteAll();
+    writer.commit();
+
+    // assert document has been deleted (index changed), reader is stale
+    assertEquals("Document should be removed", 0, writer.numDocs());
+    assertFalse("Reader should be stale", reader.isCurrent());
+
+    reader.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestLazyBug.java b/lucene/backwards/src/test/org/apache/lucene/index/TestLazyBug.java
new file mode 100755
index 0000000..9dd371a
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestLazyBug.java
@@ -0,0 +1,147 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+
+/**
+ * Test demonstrating EOF bug on the last field of the last doc 
+ * if other docs have allready been accessed.
+ */
+public class TestLazyBug extends LuceneTestCase {
+
+  public static int NUM_DOCS = TEST_NIGHTLY ? 500 : 50;
+  public static int NUM_FIELDS = TEST_NIGHTLY ? 100 : 10;
+
+  private static String[] data = new String[] {
+    "now",
+    "is the time",
+    "for all good men",
+    "to come to the aid",
+    "of their country!",
+    "this string contains big chars:{\u0111 \u0222 \u0333 \u1111 \u2222 \u3333}",
+    "this string is a bigger string, mary had a little lamb, little lamb, little lamb!"
+  };
+
+  private static Set<String> dataset = new HashSet<String>(Arrays.asList(data));
+  
+  private static String MAGIC_FIELD = "f"+(NUM_FIELDS/3);
+  
+  private static Directory directory;
+  
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    directory = makeIndex();
+  }
+  
+  @AfterClass
+  public static void afterClass() throws Exception {
+    directory.close();
+    directory = null;
+  }
+
+  private static FieldSelector SELECTOR = new FieldSelector() {
+      public FieldSelectorResult accept(String f) {
+        if (f.equals(MAGIC_FIELD)) {
+          return FieldSelectorResult.LOAD;
+        }
+        return FieldSelectorResult.LAZY_LOAD;
+      }
+    };
+
+  private static Directory makeIndex() throws Exception {
+    Directory dir = newDirectory();
+    try {
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+                                                                     TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+      LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
+      lmp.setUseCompoundFile(false);
+
+      for (int d = 1; d <= NUM_DOCS; d++) {
+        Document doc = new Document();
+        for (int f = 1; f <= NUM_FIELDS; f++ ) {
+          doc.add(newField("f"+f,
+                            data[f % data.length]
+                            + '#' + data[random.nextInt(data.length)],
+                            Field.Store.NO,
+                            Field.Index.ANALYZED));
+        }
+        writer.addDocument(doc);
+      }
+      writer.close();
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+    return dir;
+  }
+  
+  public void doTest(int[] docs) throws Exception {
+    IndexReader reader = IndexReader.open(directory, true);
+    for (int i = 0; i < docs.length; i++) {
+      Document d = reader.document(docs[i], SELECTOR);
+      d.get(MAGIC_FIELD);
+      
+      List<Fieldable> fields = d.getFields();
+      for (Iterator<Fieldable> fi = fields.iterator(); fi.hasNext(); ) {
+        Fieldable f=null;
+        try {
+          f =  fi.next();
+          String fname = f.name();
+          String fval = f.stringValue();
+          assertNotNull(docs[i]+" FIELD: "+fname, fval);
+          String[] vals = fval.split("#");
+          if (!dataset.contains(vals[0]) || !dataset.contains(vals[1])) {        
+            fail("FIELD:"+fname+",VAL:"+fval);
+          }
+        } catch (Exception e) {
+          throw new Exception(docs[i]+" WTF: "+f.name(), e);
+        }
+      }
+    }
+    reader.close();
+  }
+
+  public void testLazyWorks() throws Exception {
+    doTest(new int[] { NUM_DOCS-1 });
+  }
+  
+  public void testLazyAlsoWorks() throws Exception {
+    doTest(new int[] { NUM_DOCS-1, NUM_DOCS/2 });
+  }
+
+  public void testLazyBroken() throws Exception {
+    doTest(new int[] { NUM_DOCS/2, NUM_DOCS-1 });
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestLazyProxSkipping.java b/lucene/backwards/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
new file mode 100755
index 0000000..dcdce4b
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
@@ -0,0 +1,209 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Tests lazy skipping on the proximity file.
+ *
+ */
+public class TestLazyProxSkipping extends LuceneTestCase {
+    private Searcher searcher;
+    private int seeksCounter = 0;
+    
+    private String field = "tokens";
+    private String term1 = "xx";
+    private String term2 = "yy";
+    private String term3 = "zz";
+
+    private class SeekCountingDirectory extends MockDirectoryWrapper {
+      public SeekCountingDirectory(Directory delegate) {
+        super(random, delegate);
+      }
+
+      @Override
+      public IndexInput openInput(String name) throws IOException {
+        IndexInput ii = super.openInput(name);
+        if (name.endsWith(".prx")) {
+          // we decorate the proxStream with a wrapper class that allows to count the number of calls of seek()
+          ii = new SeeksCountingStream(ii);
+        }
+        return ii;
+      }
+      
+    }
+    
+    private void createIndex(int numHits) throws IOException {
+        int numDocs = 500;
+        
+        Directory directory = new SeekCountingDirectory(new RAMDirectory());
+        // note: test explicitly disables payloads
+        IndexWriter writer = new IndexWriter(
+            directory,
+            newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).
+                setMaxBufferedDocs(10).
+                setMergePolicy(newLogMergePolicy(false))
+        );
+        for (int i = 0; i < numDocs; i++) {
+            Document doc = new Document();
+            String content;
+            if (i % (numDocs / numHits) == 0) {
+                // add a document that matches the query "term1 term2"
+                content = this.term1 + " " + this.term2;
+            } else if (i % 15 == 0) {
+                // add a document that only contains term1
+                content = this.term1 + " " + this.term1;
+            } else {
+                // add a document that contains term2 but not term 1
+                content = this.term3 + " " + this.term2;
+            }
+
+            doc.add(newField(this.field, content, Field.Store.YES, Field.Index.ANALYZED));
+            writer.addDocument(doc);
+        }
+        
+        // make sure the index has only a single segment
+        writer.optimize();
+        writer.close();
+        
+        SegmentReader reader = SegmentReader.getOnlySegmentReader(directory);
+
+      this.searcher = newSearcher(reader);
+    }
+    
+    private ScoreDoc[] search() throws IOException {
+        // create PhraseQuery "term1 term2" and search
+        PhraseQuery pq = new PhraseQuery();
+        pq.add(new Term(this.field, this.term1));
+        pq.add(new Term(this.field, this.term2));
+        return this.searcher.search(pq, null, 1000).scoreDocs;        
+    }
+    
+    private void performTest(int numHits) throws IOException {
+        createIndex(numHits);
+        this.seeksCounter = 0;
+        ScoreDoc[] hits = search();
+        // verify that the right number of docs was found
+        assertEquals(numHits, hits.length);
+        
+        // check if the number of calls of seek() does not exceed the number of hits
+        assertTrue(this.seeksCounter > 0);
+        assertTrue(this.seeksCounter <= numHits + 1);
+    }
+    
+    public void testLazySkipping() throws IOException {
+        // test whether only the minimum amount of seeks()
+        // are performed
+        performTest(5);
+        searcher.close();
+        performTest(10);
+        searcher.close();
+    }
+    
+    public void testSeek() throws IOException {
+        Directory directory = newDirectory();
+        IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+        for (int i = 0; i < 10; i++) {
+            Document doc = new Document();
+            doc.add(newField(this.field, "a b", Field.Store.YES, Field.Index.ANALYZED));
+            writer.addDocument(doc);
+        }
+        
+        writer.close();
+        IndexReader reader = IndexReader.open(directory, true);
+        TermPositions tp = reader.termPositions();
+        tp.seek(new Term(this.field, "b"));
+        for (int i = 0; i < 10; i++) {
+            tp.next();
+            assertEquals(tp.doc(), i);
+            assertEquals(tp.nextPosition(), 1);
+        }
+        tp.seek(new Term(this.field, "a"));
+        for (int i = 0; i < 10; i++) {
+            tp.next();
+            assertEquals(tp.doc(), i);
+            assertEquals(tp.nextPosition(), 0);
+        }
+        reader.close();
+        directory.close();
+        
+    }
+    
+
+    // Simply extends IndexInput in a way that we are able to count the number
+    // of invocations of seek()
+    class SeeksCountingStream extends IndexInput {
+          private IndexInput input;      
+          
+          
+          SeeksCountingStream(IndexInput input) {
+              this.input = input;
+          }      
+                
+          @Override
+          public byte readByte() throws IOException {
+              return this.input.readByte();
+          }
+    
+          @Override
+          public void readBytes(byte[] b, int offset, int len) throws IOException {
+              this.input.readBytes(b, offset, len);        
+          }
+    
+          @Override
+          public void close() throws IOException {
+              this.input.close();
+          }
+    
+          @Override
+          public long getFilePointer() {
+              return this.input.getFilePointer();
+          }
+    
+          @Override
+          public void seek(long pos) throws IOException {
+              TestLazyProxSkipping.this.seeksCounter++;
+              this.input.seek(pos);
+          }
+    
+          @Override
+          public long length() {
+              return this.input.length();
+          }
+          
+          @Override
+          public Object clone() {
+              return new SeeksCountingStream((IndexInput) this.input.clone());
+          }
+      
+    }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestLongPostings.java b/lucene/backwards/src/test/org/apache/lucene/index/TestLongPostings.java
new file mode 100644
index 0000000..ee68c9f
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestLongPostings.java
@@ -0,0 +1,456 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util._TestUtil;
+
+public class TestLongPostings extends LuceneTestCase {
+
+  // Produces a realistic unicode random string that
+  // survives MockAnalyzer unchanged:
+  private String getRandomTerm(String other) throws IOException {
+    Analyzer a = new MockAnalyzer(random);
+    while(true) {
+      String s = _TestUtil.randomRealisticUnicodeString(random);
+      if (other != null && s.equals(other)) {
+        continue;
+      }
+      final TokenStream ts = a.tokenStream("foo", new StringReader(s));
+      final TermAttribute termAtt = ts.getAttribute(TermAttribute.class);
+      int count = 0;
+      ts.reset();
+      while(ts.incrementToken()) {
+        if (count == 0 && !termAtt.term().equals(s)) {
+          break;
+        }
+        count++;
+      }
+      if (count == 1) {
+        return s;
+      }
+    }
+  }
+
+  public void testLongPostings() throws Exception {
+    // Don't use _TestUtil.getTempDir so that we own the
+    // randomness (ie same seed will point to same dir):
+    Directory dir = newFSDirectory(_TestUtil.getTempDir("longpostings" + "." + random.nextLong()));
+
+    final int NUM_DOCS = atLeast(2000);
+
+    if (VERBOSE) {
+      System.out.println("TEST: NUM_DOCS=" + NUM_DOCS);
+    }
+
+    final String s1 = getRandomTerm(null);
+    final String s2 = getRandomTerm(s1);
+
+    if (VERBOSE) {
+      System.out.println("\nTEST: s1=" + s1 + " s2=" + s2);
+      /*
+      for(int idx=0;idx<s1.length();idx++) {
+        System.out.println("  s1 ch=0x" + Integer.toHexString(s1.charAt(idx)));
+      }
+      for(int idx=0;idx<s2.length();idx++) {
+        System.out.println("  s2 ch=0x" + Integer.toHexString(s2.charAt(idx)));
+      }
+      */
+    }
+
+    final FixedBitSet isS1 = new FixedBitSet(NUM_DOCS);
+    for(int idx=0;idx<NUM_DOCS;idx++) {
+      if (random.nextBoolean()) {
+        isS1.set(idx);
+      }
+    }
+
+    final IndexReader r;
+    if (true) { 
+      final IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
+        .setMergePolicy(newLogMergePolicy());
+      iwc.setRAMBufferSizeMB(16.0 + 16.0 * random.nextDouble());
+      iwc.setMaxBufferedDocs(-1);
+      final RandomIndexWriter riw = new RandomIndexWriter(random, dir, iwc);
+
+      for(int idx=0;idx<NUM_DOCS;idx++) {
+        final Document doc = new Document();
+        String s = isS1.get(idx) ? s1 : s2;
+        final Field f = newField("field", s, Field.Index.ANALYZED);
+        final int count = _TestUtil.nextInt(random, 1, 4);
+        for(int ct=0;ct<count;ct++) {
+          doc.add(f);
+        }
+        riw.addDocument(doc);
+      }
+
+      r = riw.getReader();
+      riw.close();
+    } else {
+      r = IndexReader.open(dir);
+    }
+
+    /*
+    if (VERBOSE) {
+      System.out.println("TEST: terms");
+      TermEnum termEnum = r.terms();
+      while(termEnum.next()) {
+        System.out.println("  term=" + termEnum.term() + " len=" + termEnum.term().text().length());
+        assertTrue(termEnum.docFreq() > 0);
+        System.out.println("    s1?=" + (termEnum.term().text().equals(s1)) + " s1len=" + s1.length());
+        System.out.println("    s2?=" + (termEnum.term().text().equals(s2)) + " s2len=" + s2.length());
+        final String s = termEnum.term().text();
+        for(int idx=0;idx<s.length();idx++) {
+          System.out.println("      ch=0x" + Integer.toHexString(s.charAt(idx)));
+        }
+      }
+    }
+    */
+
+    assertEquals(NUM_DOCS, r.numDocs());
+    assertTrue(r.docFreq(new Term("field", s1)) > 0);
+    assertTrue(r.docFreq(new Term("field", s2)) > 0);
+
+    final byte[] payload = new byte[100];
+
+    int num = atLeast(1000);
+    for(int iter=0;iter<num;iter++) {
+
+      final String term;
+      final boolean doS1;
+      if (random.nextBoolean()) {
+        term = s1;
+        doS1 = true;
+      } else {
+        term = s2;
+        doS1 = false;
+      }
+
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter + " doS1=" + doS1);
+      }
+        
+      final TermPositions postings = r.termPositions(new Term("field", term));
+
+      int docID = -1;
+      while(docID < Integer.MAX_VALUE) {
+        final int what = random.nextInt(3);
+        if (what == 0) {
+          if (VERBOSE) {
+            System.out.println("TEST: docID=" + docID + "; do next()");
+          }
+          // nextDoc
+          int expected = docID+1;
+          while(true) {
+            if (expected == NUM_DOCS) {
+              expected = Integer.MAX_VALUE;
+              break;
+            } else if (isS1.get(expected) == doS1) {
+              break;
+            } else {
+              expected++;
+            }
+          }
+          boolean result = postings.next();
+          if (!result) {
+            assertEquals(Integer.MAX_VALUE, expected);
+            if (VERBOSE) {
+              System.out.println("  end");
+            }
+            break;
+          } else {
+            docID = postings.doc();
+            if (VERBOSE) {
+              System.out.println("  got docID=" + docID);
+            }
+            assertEquals(expected, docID);
+
+            if (random.nextInt(6) == 3) {
+              final int freq = postings.freq();
+              assertTrue(freq >=1 && freq <= 4);
+              for(int pos=0;pos<freq;pos++) {
+                assertEquals(pos, postings.nextPosition());
+                if (random.nextBoolean() && postings.isPayloadAvailable()) {
+                  postings.getPayload(payload, 0);
+                }
+              }
+            }
+          }
+        } else {
+          // advance
+          final int targetDocID;
+          if (docID == -1) {
+            targetDocID = random.nextInt(NUM_DOCS+1);
+          } else {
+            targetDocID = docID + _TestUtil.nextInt(random, 1, NUM_DOCS - docID);
+          }
+          if (VERBOSE) {
+            System.out.println("TEST: docID=" + docID + "; do skipTo(" + targetDocID + ")");
+          }
+          int expected = targetDocID;
+          while(true) {
+            if (expected == NUM_DOCS) {
+              expected = Integer.MAX_VALUE;
+              break;
+            } else if (isS1.get(expected) == doS1) {
+              break;
+            } else {
+              expected++;
+            }
+          }
+          
+          final boolean result = postings.skipTo(targetDocID);
+          if (!result) {
+            assertEquals(Integer.MAX_VALUE, expected);
+            if (VERBOSE) {
+              System.out.println("  end");
+            }
+            break;
+          } else {
+            docID = postings.doc();
+            if (VERBOSE) {
+              System.out.println("  got docID=" + docID);
+            }
+            assertEquals(expected, docID);
+          
+            if (random.nextInt(6) == 3) {
+              final int freq = postings.freq();
+              assertTrue(freq >=1 && freq <= 4);
+              for(int pos=0;pos<freq;pos++) {
+                assertEquals(pos, postings.nextPosition());
+                if (random.nextBoolean() && postings.isPayloadAvailable()) {
+                  postings.getPayload(payload, 0);
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+    r.close();
+    dir.close();
+  }
+  
+  // a weaker form of testLongPostings, that doesnt check positions
+  public void testLongPostingsNoPositions() throws Exception {
+    doTestLongPostingsNoPositions(IndexOptions.DOCS_ONLY);
+    doTestLongPostingsNoPositions(IndexOptions.DOCS_AND_FREQS);
+  }
+  
+  public void doTestLongPostingsNoPositions(IndexOptions options) throws Exception {
+    // Don't use _TestUtil.getTempDir so that we own the
+    // randomness (ie same seed will point to same dir):
+    Directory dir = newFSDirectory(_TestUtil.getTempDir("longpostings" + "." + random.nextLong()));
+
+    final int NUM_DOCS = atLeast(2000);
+
+    if (VERBOSE) {
+      System.out.println("TEST: NUM_DOCS=" + NUM_DOCS);
+    }
+
+    final String s1 = getRandomTerm(null);
+    final String s2 = getRandomTerm(s1);
+
+    if (VERBOSE) {
+      System.out.println("\nTEST: s1=" + s1 + " s2=" + s2);
+      /*
+      for(int idx=0;idx<s1.length();idx++) {
+        System.out.println("  s1 ch=0x" + Integer.toHexString(s1.charAt(idx)));
+      }
+      for(int idx=0;idx<s2.length();idx++) {
+        System.out.println("  s2 ch=0x" + Integer.toHexString(s2.charAt(idx)));
+      }
+      */
+    }
+
+    final FixedBitSet isS1 = new FixedBitSet(NUM_DOCS);
+    for(int idx=0;idx<NUM_DOCS;idx++) {
+      if (random.nextBoolean()) {
+        isS1.set(idx);
+      }
+    }
+
+    final IndexReader r;
+    if (true) { 
+      final IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
+        .setMergePolicy(newLogMergePolicy());
+      iwc.setRAMBufferSizeMB(16.0 + 16.0 * random.nextDouble());
+      iwc.setMaxBufferedDocs(-1);
+      final RandomIndexWriter riw = new RandomIndexWriter(random, dir, iwc);
+
+      for(int idx=0;idx<NUM_DOCS;idx++) {
+        final Document doc = new Document();
+        String s = isS1.get(idx) ? s1 : s2;
+        final Field f = newField("field", s, Field.Index.ANALYZED);
+        f.setIndexOptions(options);
+        final int count = _TestUtil.nextInt(random, 1, 4);
+        for(int ct=0;ct<count;ct++) {
+          doc.add(f);
+        }
+        riw.addDocument(doc);
+      }
+
+      r = riw.getReader();
+      riw.close();
+    } else {
+      r = IndexReader.open(dir);
+    }
+
+    /*
+    if (VERBOSE) {
+      System.out.println("TEST: terms");
+      TermEnum termEnum = r.terms();
+      while(termEnum.next()) {
+        System.out.println("  term=" + termEnum.term() + " len=" + termEnum.term().text().length());
+        assertTrue(termEnum.docFreq() > 0);
+        System.out.println("    s1?=" + (termEnum.term().text().equals(s1)) + " s1len=" + s1.length());
+        System.out.println("    s2?=" + (termEnum.term().text().equals(s2)) + " s2len=" + s2.length());
+        final String s = termEnum.term().text();
+        for(int idx=0;idx<s.length();idx++) {
+          System.out.println("      ch=0x" + Integer.toHexString(s.charAt(idx)));
+        }
+      }
+    }
+    */
+
+    assertEquals(NUM_DOCS, r.numDocs());
+    assertTrue(r.docFreq(new Term("field", s1)) > 0);
+    assertTrue(r.docFreq(new Term("field", s2)) > 0);
+
+    final byte[] payload = new byte[100];
+
+    int num = atLeast(1000);
+    for(int iter=0;iter<num;iter++) {
+
+      final String term;
+      final boolean doS1;
+      if (random.nextBoolean()) {
+        term = s1;
+        doS1 = true;
+      } else {
+        term = s2;
+        doS1 = false;
+      }
+
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter + " doS1=" + doS1);
+      }
+        
+      final TermDocs postings = r.termDocs(new Term("field", term));
+
+      int docID = -1;
+      while(docID < Integer.MAX_VALUE) {
+        final int what = random.nextInt(3);
+        if (what == 0) {
+          if (VERBOSE) {
+            System.out.println("TEST: docID=" + docID + "; do next()");
+          }
+          // nextDoc
+          int expected = docID+1;
+          while(true) {
+            if (expected == NUM_DOCS) {
+              expected = Integer.MAX_VALUE;
+              break;
+            } else if (isS1.get(expected) == doS1) {
+              break;
+            } else {
+              expected++;
+            }
+          }
+          boolean result = postings.next();
+          if (!result) {
+            assertEquals(Integer.MAX_VALUE, expected);
+            if (VERBOSE) {
+              System.out.println("  end");
+            }
+            break;
+          } else {
+            docID = postings.doc();
+            if (VERBOSE) {
+              System.out.println("  got docID=" + docID);
+            }
+            assertEquals(expected, docID);
+
+            if (random.nextInt(6) == 3) {
+              final int freq = postings.freq();
+              assertTrue(freq >=1 && freq <= 4);
+            }
+          }
+        } else {
+          // advance
+          final int targetDocID;
+          if (docID == -1) {
+            targetDocID = random.nextInt(NUM_DOCS+1);
+          } else {
+            targetDocID = docID + _TestUtil.nextInt(random, 1, NUM_DOCS - docID);
+          }
+          if (VERBOSE) {
+            System.out.println("TEST: docID=" + docID + "; do skipTo(" + targetDocID + ")");
+          }
+          int expected = targetDocID;
+          while(true) {
+            if (expected == NUM_DOCS) {
+              expected = Integer.MAX_VALUE;
+              break;
+            } else if (isS1.get(expected) == doS1) {
+              break;
+            } else {
+              expected++;
+            }
+          }
+          
+          final boolean result = postings.skipTo(targetDocID);
+          if (!result) {
+            assertEquals(Integer.MAX_VALUE, expected);
+            if (VERBOSE) {
+              System.out.println("  end");
+            }
+            break;
+          } else {
+            docID = postings.doc();
+            if (VERBOSE) {
+              System.out.println("  got docID=" + docID);
+            }
+            assertEquals(expected, docID);
+          
+            if (random.nextInt(6) == 3) {
+              final int freq = postings.freq();
+              assertTrue(freq >=1 && freq <= 4);
+            }
+          }
+        }
+      }
+    }
+    r.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestMaxTermFrequency.java b/lucene/backwards/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
new file mode 100644
index 0000000..5dcdfee
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
@@ -0,0 +1,116 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+/**
+ * Tests the maxTermFrequency statistic in FieldInvertState
+ */
+public class TestMaxTermFrequency extends LuceneTestCase { 
+  Directory dir;
+  IndexReader reader;
+  /* expected maxTermFrequency values for our documents */
+  ArrayList<Integer> expected = new ArrayList<Integer>();
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    dir = newDirectory();
+    IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, 
+               new MockAnalyzer(random, MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy());
+    config.setSimilarity(new TestSimilarity());
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir, config);
+    Document doc = new Document();
+    Field foo = newField("foo", "", Field.Store.NO, Field.Index.ANALYZED);
+    doc.add(foo);
+    for (int i = 0; i < 100; i++) {
+      foo.setValue(addValue());
+      writer.addDocument(doc);
+    }
+    reader = writer.getReader();
+    writer.close();
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    reader.close();
+    dir.close();
+    super.tearDown();
+  }
+  
+  public void test() throws Exception {
+    byte fooNorms[] = reader.norms("foo");
+    for (int i = 0; i < reader.maxDoc(); i++)
+      assertEquals(expected.get(i).intValue(), fooNorms[i] & 0xff);
+  }
+
+  /**
+   * Makes a bunch of single-char tokens (the max freq will at most be 255).
+   * shuffles them around, and returns the whole list with Arrays.toString().
+   * This works fine because we use lettertokenizer.
+   * puts the max-frequency term into expected, to be checked against the norm.
+   */
+  private String addValue() {
+    List<String> terms = new ArrayList<String>();
+    int maxCeiling = _TestUtil.nextInt(random, 0, 255);
+    int max = 0;
+    for (char ch = 'a'; ch <= 'z'; ch++) {
+      int num = _TestUtil.nextInt(random, 0, maxCeiling);
+      for (int i = 0; i < num; i++)
+        terms.add(Character.toString(ch));
+      max = Math.max(max, num);
+    }
+    expected.add(max);
+    Collections.shuffle(terms, random);
+    return Arrays.toString(terms.toArray(new String[terms.size()]));
+  }
+  
+  /**
+   * Simple similarity that encodes maxTermFrequency directly as a byte
+   */
+  class TestSimilarity extends DefaultSimilarity {
+
+    @Override
+    public byte encodeNormValue(float f) {
+      return (byte) f;
+    }
+    
+    @Override
+    public float decodeNormValue(byte b) {
+      return (float) b;
+    }
+
+    @Override
+    public float computeNorm(String field, FieldInvertState state) {
+      return (float) state.getMaxTermFrequency();
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java b/lucene/backwards/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
new file mode 100644
index 0000000..2f9ea31
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
@@ -0,0 +1,173 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.LowerCaseTokenizer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * This testcase tests whether multi-level skipping is being used
+ * to reduce I/O while skipping through posting lists.
+ * 
+ * Skipping in general is already covered by several other
+ * testcases.
+ * 
+ */
+public class TestMultiLevelSkipList extends LuceneTestCase {
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    PayloadFilter.count = 0;
+  }
+
+  public void testSimpleSkip() throws IOException {
+    RAMDirectory dir = new RAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer()).setMergePolicy(newLogMergePolicy()));
+    Term term = new Term("test", "a");
+    for (int i = 0; i < 5000; i++) {
+      Document d1 = new Document();
+      d1.add(newField(term.field(), term.text(), Store.NO, Index.ANALYZED));
+      writer.addDocument(d1);
+    }
+    writer.commit();
+    writer.optimize();
+    writer.close();
+
+    IndexReader reader = SegmentReader.getOnlySegmentReader(dir);
+    SegmentTermPositions tp = (SegmentTermPositions) reader.termPositions();
+    tp.freqStream = new CountingStream(tp.freqStream);
+
+    for (int i = 0; i < 2; i++) {
+      counter = 0;
+      tp.seek(term);
+
+      checkSkipTo(tp, 14, 185); // no skips
+      checkSkipTo(tp, 17, 190); // one skip on level 0
+      checkSkipTo(tp, 287, 200); // one skip on level 1, two on level 0
+    
+      // this test would fail if we had only one skip level,
+      // because than more bytes would be read from the freqStream
+      checkSkipTo(tp, 4800, 250);// one skip on level 2
+    }
+  }
+
+  public void checkSkipTo(TermPositions tp, int target, int maxCounter) throws IOException {
+    tp.skipTo(target);
+    if (maxCounter < counter) {
+      fail("Too many bytes read: " + counter + " vs " + maxCounter);
+    }
+
+    assertEquals("Wrong document " + tp.doc() + " after skipTo target " + target, target, tp.doc());
+    assertEquals("Frequency is not 1: " + tp.freq(), 1,tp.freq());
+    tp.nextPosition();
+    byte[] b = new byte[1];
+    tp.getPayload(b, 0);
+    assertEquals("Wrong payload for the target " + target + ": " + b[0], (byte) target, b[0]);
+  }
+
+  private static class PayloadAnalyzer extends Analyzer {
+    @Override
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      return new PayloadFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader));
+    }
+
+  }
+
+  private static class PayloadFilter extends TokenFilter {
+    static int count = 0;
+    
+    PayloadAttribute payloadAtt;
+    
+    protected PayloadFilter(TokenStream input) {
+      super(input);
+      payloadAtt = addAttribute(PayloadAttribute.class);
+    }
+
+    @Override
+    public boolean incrementToken() throws IOException {
+      boolean hasNext = input.incrementToken();
+      if (hasNext) {
+        payloadAtt.setPayload(new Payload(new byte[] { (byte) count++ }));
+      } 
+      return hasNext;
+    }
+
+  }
+
+  private int counter = 0;
+
+  // Simply extends IndexInput in a way that we are able to count the number
+  // of bytes read
+  class CountingStream extends IndexInput {
+    private IndexInput input;
+
+    CountingStream(IndexInput input) {
+      this.input = input;
+    }
+
+    @Override
+    public byte readByte() throws IOException {
+      TestMultiLevelSkipList.this.counter++;
+      return this.input.readByte();
+    }
+
+    @Override
+    public void readBytes(byte[] b, int offset, int len) throws IOException {
+      TestMultiLevelSkipList.this.counter += len;
+      this.input.readBytes(b, offset, len);
+    }
+
+    @Override
+    public void close() throws IOException {
+      this.input.close();
+    }
+
+    @Override
+    public long getFilePointer() {
+      return this.input.getFilePointer();
+    }
+
+    @Override
+    public void seek(long pos) throws IOException {
+      this.input.seek(pos);
+    }
+
+    @Override
+    public long length() {
+      return this.input.length();
+    }
+
+    @Override
+    public Object clone() {
+      return new CountingStream((IndexInput) this.input.clone());
+    }
+
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestMultiReader.java b/lucene/backwards/src/test/org/apache/lucene/index/TestMultiReader.java
new file mode 100644
index 0000000..c934b4d
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestMultiReader.java
@@ -0,0 +1,44 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+public class TestMultiReader extends TestDirectoryReader {
+
+  @Override
+  protected IndexReader openReader() throws IOException {
+    IndexReader reader;
+
+    sis.read(dir);
+    SegmentReader reader1 = SegmentReader.get(false, sis.info(0), IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+    SegmentReader reader2 = SegmentReader.get(false, sis.info(1), IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+    readers[0] = reader1;
+    readers[1] = reader2;
+    assertTrue(reader1 != null);
+    assertTrue(reader2 != null);
+
+    reader = new MultiReader(readers);
+
+    assertTrue(dir != null);
+    assertTrue(sis != null);
+    
+    return reader;
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java b/lucene/backwards/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java
new file mode 100644
index 0000000..0798b1a
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java
@@ -0,0 +1,116 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestNRTReaderWithThreads extends LuceneTestCase {
+  AtomicInteger seq = new AtomicInteger(1);
+
+  public void testIndexing() throws Exception {
+    Directory mainDir = newDirectory();
+    IndexWriter writer = new IndexWriter(
+        mainDir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(10).
+            setMergePolicy(newLogMergePolicy(false,2))
+    );
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    IndexReader reader = writer.getReader(); // start pooling readers
+    reader.close();
+    RunThread[] indexThreads = new RunThread[4];
+    for (int x=0; x < indexThreads.length; x++) {
+      indexThreads[x] = new RunThread(x % 2, writer);
+      indexThreads[x].setName("Thread " + x);
+      indexThreads[x].start();
+    }    
+    long startTime = System.currentTimeMillis();
+    long duration = 1000;
+    while ((System.currentTimeMillis() - startTime) < duration) {
+      Thread.sleep(100);
+    }
+    int delCount = 0;
+    int addCount = 0;
+    for (int x=0; x < indexThreads.length; x++) {
+      indexThreads[x].run = false;
+      assertNull("Exception thrown: "+indexThreads[x].ex, indexThreads[x].ex);
+      addCount += indexThreads[x].addCount;
+      delCount += indexThreads[x].delCount;
+    }
+    for (int x=0; x < indexThreads.length; x++) {
+      indexThreads[x].join();
+    }
+    for (int x=0; x < indexThreads.length; x++) {
+      assertNull("Exception thrown: "+indexThreads[x].ex, indexThreads[x].ex);
+    }
+    //System.out.println("addCount:"+addCount);
+    //System.out.println("delCount:"+delCount);
+    writer.close();
+    mainDir.close();
+  }
+
+  public class RunThread extends Thread {
+    IndexWriter writer;
+    volatile boolean run = true;
+    volatile Throwable ex;
+    int delCount = 0;
+    int addCount = 0;
+    int type;
+    final Random r = new Random(random.nextLong());
+    
+    public RunThread(int type, IndexWriter writer) {
+      this.type = type;
+      this.writer = writer;
+    }
+
+    @Override
+    public void run() {
+      try {
+        while (run) {
+          //int n = random.nextInt(2);
+          if (type == 0) {
+            int i = seq.addAndGet(1);
+            Document doc = DocHelper.createDocument(i, "index1", 10);
+            writer.addDocument(doc);
+            addCount++;
+          } else if (type == 1) {
+            // we may or may not delete because the term may not exist,
+            // however we're opening and closing the reader rapidly
+            IndexReader reader = writer.getReader();
+            int id = r.nextInt(seq.intValue());
+            Term term = new Term("id", Integer.toString(id));
+            int count = TestIndexWriterReader.count(term, reader);
+            writer.deleteDocuments(term);
+            reader.close();
+            delCount += count;
+          }
+        }
+      } catch (Throwable ex) {
+        ex.printStackTrace(System.out);
+        this.ex = ex;
+        run = false;
+      }
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestNRTThreads.java b/lucene/backwards/src/test/org/apache/lucene/index/TestNRTThreads.java
new file mode 100644
index 0000000..5f489b7
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestNRTThreads.java
@@ -0,0 +1,547 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.LineFileDocs;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import org.junit.Test;
+
+// TODO
+//   - mix in optimize, addIndexes
+//   - randomoly mix in non-congruent docs
+
+public class TestNRTThreads extends LuceneTestCase {
+
+  private static class SubDocs {
+    public final String packID;
+    public final List<String> subIDs;
+    public boolean deleted;
+
+    public SubDocs(String packID, List<String> subIDs) {
+      this.packID = packID;
+      this.subIDs = subIDs;
+    }
+  }
+
+  @Test
+  public void testNRTThreads() throws Exception {
+
+    final long t0 = System.currentTimeMillis();
+
+    final LineFileDocs docs = new LineFileDocs(random);
+    final File tempDir = _TestUtil.getTempDir("nrtopenfiles");
+    final MockDirectoryWrapper dir = newFSDirectory(tempDir);
+    dir.setCheckIndexOnClose(false); // don't double-checkIndex, we do it ourselves.
+    final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    conf.setMergedSegmentWarmer(new IndexWriter.IndexReaderWarmer() {
+      @Override
+      public void warm(IndexReader reader) throws IOException {
+        if (VERBOSE) {
+          System.out.println("TEST: now warm merged reader=" + reader);
+        }
+        final int maxDoc = reader.maxDoc();
+        int sum = 0;
+        final int inc = Math.max(1, maxDoc/50);
+        for(int docID=0;docID<maxDoc;docID += inc) {
+          if (reader.isDeleted(docID)) {
+            final Document doc = reader.document(docID);
+            sum += doc.getFields().size();
+          }
+        }
+
+        IndexSearcher searcher = newSearcher(reader);
+        sum += searcher.search(new TermQuery(new Term("body", "united")), 10).totalHits;
+        searcher.close();
+
+        if (VERBOSE) {
+          System.out.println("TEST: warm visited " + sum + " fields");
+        }
+      }
+      });
+    
+    final IndexWriter writer = new IndexWriter(dir, conf);
+    if (VERBOSE) {
+      writer.setInfoStream(System.out);
+    }
+    _TestUtil.reduceOpenFiles(writer);
+
+    final int NUM_INDEX_THREADS = 2;
+    final int NUM_SEARCH_THREADS = 3;
+
+    final int RUN_TIME_SEC = LuceneTestCase.TEST_NIGHTLY ? 300 : RANDOM_MULTIPLIER;
+
+    final AtomicBoolean failed = new AtomicBoolean();
+    final AtomicInteger addCount = new AtomicInteger();
+    final AtomicInteger delCount = new AtomicInteger();
+    final AtomicInteger packCount = new AtomicInteger();
+
+    final Set<String> delIDs = Collections.synchronizedSet(new HashSet<String>());
+    final List<SubDocs> allSubDocs = Collections.synchronizedList(new ArrayList<SubDocs>());
+
+    final long stopTime = System.currentTimeMillis() + RUN_TIME_SEC*1000;
+    Thread[] threads = new Thread[NUM_INDEX_THREADS];
+    for(int thread=0;thread<NUM_INDEX_THREADS;thread++) {
+      threads[thread] = new Thread() {
+          @Override
+          public void run() {
+            // TODO: would be better if this were cross thread, so that we make sure one thread deleting anothers added docs works:
+            final List<String> toDeleteIDs = new ArrayList<String>();
+            final List<SubDocs> toDeleteSubDocs = new ArrayList<SubDocs>();
+            while(System.currentTimeMillis() < stopTime && !failed.get()) {
+              try {
+                Document doc = docs.nextDoc();
+                if (doc == null) {
+                  break;
+                }
+                final String addedField;
+                if (random.nextBoolean()) {
+                  addedField = "extra" + random.nextInt(10);
+                  doc.add(new Field(addedField, "a random field", Field.Store.NO, Field.Index.ANALYZED));
+                } else {
+                  addedField = null;
+                }
+                if (random.nextBoolean()) {
+                  if (VERBOSE) {
+                    System.out.println(Thread.currentThread().getName() + ": add doc id:" + doc.get("docid"));
+                  }
+
+                  if (random.nextBoolean()) {
+                    // Add a pack of adjacent sub-docs
+                    final String packID;
+                    final SubDocs delSubDocs;
+                    if (toDeleteSubDocs.size() > 0 && random.nextBoolean()) {
+                      delSubDocs = toDeleteSubDocs.get(random.nextInt(toDeleteSubDocs.size()));
+                      assert !delSubDocs.deleted;
+                      toDeleteSubDocs.remove(delSubDocs);
+                      // reuse prior packID
+                      packID = delSubDocs.packID;
+                    } else {
+                      delSubDocs = null;
+                      // make new packID
+                      packID = packCount.getAndIncrement() + "";
+                    }
+
+                    final Field packIDField = newField("packID", packID, Field.Store.YES, Field.Index.NOT_ANALYZED);
+                    final List<String> docIDs = new ArrayList<String>();
+                    final SubDocs subDocs = new SubDocs(packID, docIDs);
+                    final List<Document> docsList = new ArrayList<Document>();
+
+                    allSubDocs.add(subDocs);
+                    doc.add(packIDField);
+                    docsList.add(_TestUtil.cloneDocument(doc));
+                    docIDs.add(doc.get("docid"));
+
+                    final int maxDocCount = _TestUtil.nextInt(random, 1, 10);
+                    while(docsList.size() < maxDocCount) {
+                      doc = docs.nextDoc();
+                      if (doc == null) {
+                        break;
+                      }
+                      docsList.add(_TestUtil.cloneDocument(doc));
+                      docIDs.add(doc.get("docid"));
+                    }
+                    addCount.addAndGet(docsList.size());
+
+                    if (delSubDocs != null) {
+                      delSubDocs.deleted = true;
+                      delIDs.addAll(delSubDocs.subIDs);
+                      delCount.addAndGet(delSubDocs.subIDs.size());
+                      if (VERBOSE) {
+                        System.out.println("TEST: update pack packID=" + delSubDocs.packID + " count=" + docsList.size() + " docs=" + docIDs);
+                      }
+                      writer.updateDocuments(new Term("packID", delSubDocs.packID), docsList);
+                      /*
+                      // non-atomic:
+                      writer.deleteDocuments(new Term("packID", delSubDocs.packID));
+                      for(Document subDoc : docsList) {
+                        writer.addDocument(subDoc);
+                      }
+                      */
+                    } else {
+                      if (VERBOSE) {
+                        System.out.println("TEST: add pack packID=" + packID + " count=" + docsList.size() + " docs=" + docIDs);
+                      }
+                      writer.addDocuments(docsList);
+                      
+                      /*
+                      // non-atomic:
+                      for(Document subDoc : docsList) {
+                        writer.addDocument(subDoc);
+                      }
+                      */
+                    }
+                    doc.removeField("packID");
+
+                    if (random.nextInt(5) == 2) {
+                      if (VERBOSE) {
+                        //System.out.println(Thread.currentThread().getName() + ": buffer del id:" + packID);
+                      }
+                      toDeleteSubDocs.add(subDocs);
+                    }
+
+                  } else {
+                    writer.addDocument(doc);
+                    addCount.getAndIncrement();
+
+                    if (random.nextInt(5) == 3) {
+                      if (VERBOSE) {
+                        //System.out.println(Thread.currentThread().getName() + ": buffer del id:" + doc.get("docid"));
+                      }
+                      toDeleteIDs.add(doc.get("docid"));
+                    }
+                  }
+                } else {
+                  // we use update but it never replaces a
+                  // prior doc
+                  if (VERBOSE) {
+                    System.out.println(Thread.currentThread().getName() + ": update doc id:" + doc.get("docid"));
+                  }
+                  writer.updateDocument(new Term("docid", doc.get("docid")), doc);
+                  addCount.getAndIncrement();
+
+                  if (random.nextInt(5) == 3) {
+                    if (VERBOSE) {
+                      //System.out.println(Thread.currentThread().getName() + ": buffer del id:" + doc.get("docid"));
+                    }
+                    toDeleteIDs.add(doc.get("docid"));
+                  }
+                }
+
+                if (random.nextInt(30) == 17) {
+                  if (VERBOSE) {
+                    System.out.println(Thread.currentThread().getName() + ": apply " + toDeleteIDs.size() + " deletes");
+                  }
+                  for(String id : toDeleteIDs) {
+                    if (VERBOSE) {
+                      System.out.println(Thread.currentThread().getName() + ": del term=id:" + id);
+                    }
+                    writer.deleteDocuments(new Term("docid", id));
+                  }
+                  final int count = delCount.addAndGet(toDeleteIDs.size());
+                  if (VERBOSE) {
+                    System.out.println(Thread.currentThread().getName() + ": tot " + count + " deletes");
+                  }
+                  delIDs.addAll(toDeleteIDs);
+                  toDeleteIDs.clear();
+
+                  for(SubDocs subDocs : toDeleteSubDocs) {
+                    assert !subDocs.deleted;
+                    writer.deleteDocuments(new Term("packID", subDocs.packID));
+                    subDocs.deleted = true;
+                    if (VERBOSE) {
+                      System.out.println("  del subs: " + subDocs.subIDs + " packID=" + subDocs.packID);
+                    }
+                    delIDs.addAll(subDocs.subIDs);
+                    delCount.addAndGet(subDocs.subIDs.size());
+                  }
+                  toDeleteSubDocs.clear();
+                }
+                if (addedField != null) {
+                  doc.removeField(addedField);
+                }
+              } catch (Throwable t) {
+                System.out.println(Thread.currentThread().getName() + ": hit exc");
+                t.printStackTrace();
+                failed.set(true);
+                throw new RuntimeException(t);
+              }
+            }
+            if (VERBOSE) {
+              System.out.println(Thread.currentThread().getName() + ": indexing done");
+            }
+          }
+        };
+      threads[thread].setDaemon(true);
+      threads[thread].start();
+    }
+
+    if (VERBOSE) {
+      System.out.println("TEST: DONE start indexing threads [" + (System.currentTimeMillis()-t0) + " ms]");
+    }
+
+    // let index build up a bit
+    Thread.sleep(100);
+
+    IndexReader r = IndexReader.open(writer, true);
+    boolean any = false;
+
+    // silly starting guess:
+    final AtomicInteger totTermCount = new AtomicInteger(100);
+
+    final ExecutorService es = Executors.newCachedThreadPool();
+
+    while(System.currentTimeMillis() < stopTime && !failed.get()) {
+      if (random.nextBoolean()) {
+        if (VERBOSE) {
+          System.out.println("TEST: now reopen r=" + r);
+        }
+        final IndexReader r2 = r.reopen();
+        if (r != r2) {
+          r.close();
+          r = r2;
+        }
+      } else {
+        if (VERBOSE) {
+          System.out.println("TEST: now close reader=" + r);
+        }
+        r.close();
+        writer.commit();
+        final Set<String> openDeletedFiles = dir.getOpenDeletedFiles();
+        if (openDeletedFiles.size() > 0) {
+          System.out.println("OBD files: " + openDeletedFiles);
+        }
+        any |= openDeletedFiles.size() > 0;
+        //assertEquals("open but deleted: " + openDeletedFiles, 0, openDeletedFiles.size());
+        if (VERBOSE) {
+          System.out.println("TEST: now open");
+        }
+        r = IndexReader.open(writer, true);
+      }
+      if (VERBOSE) {
+        System.out.println("TEST: got new reader=" + r);
+      }
+      //System.out.println("numDocs=" + r.numDocs() + "
+      //openDelFileCount=" + dir.openDeleteFileCount());
+
+      smokeTestReader(r);
+
+      if (r.numDocs() > 0) {
+
+        final IndexSearcher s = new IndexSearcher(r, es);
+
+        // run search threads
+        final long searchStopTime = System.currentTimeMillis() + 500;
+        final Thread[] searchThreads = new Thread[NUM_SEARCH_THREADS];
+        final AtomicInteger totHits = new AtomicInteger();
+        for(int thread=0;thread<NUM_SEARCH_THREADS;thread++) {
+          searchThreads[thread] = new Thread() {
+              @Override
+              public void run() {
+                try {
+                  TermEnum termEnum = s.getIndexReader().terms(new Term("body", ""));
+                  int seenTermCount = 0;
+                  int shift;
+                  int trigger;
+                  if (totTermCount.get() < 10) {
+                    shift = 0;
+                    trigger = 1;
+                  } else {
+                    trigger = totTermCount.get()/10;
+                    shift = random.nextInt(trigger);
+                  }
+                  while(System.currentTimeMillis() < searchStopTime) {
+                    Term term = termEnum.term();
+                    if (term == null) {
+                      if (seenTermCount < 10) {
+                        break;
+                      }
+                      totTermCount.set(seenTermCount);
+                      seenTermCount = 0;
+                      trigger = totTermCount.get()/10;
+                      //System.out.println("trigger " + trigger);
+                      shift = random.nextInt(trigger);
+                      termEnum = s.getIndexReader().terms(new Term("body", ""));
+                      continue;
+                    }
+                    seenTermCount++;
+                    // search 10 terms
+                    if (trigger == 0) {
+                      trigger = 1;
+                    }
+                    if ((seenTermCount + shift) % trigger == 0) {
+                      //if (VERBOSE) {
+                      //System.out.println(Thread.currentThread().getName() + " now search body:" + term.utf8ToString());
+                      //}
+                      totHits.addAndGet(runQuery(s, new TermQuery(term)));
+                    }
+                    termEnum.next();
+                  }
+                  if (VERBOSE) {
+                    System.out.println(Thread.currentThread().getName() + ": search done");
+                  }
+                } catch (Throwable t) {
+                  System.out.println(Thread.currentThread().getName() + ": hit exc");
+                  failed.set(true);
+                  t.printStackTrace(System.out);
+                  throw new RuntimeException(t);
+                }
+              }
+            };
+          searchThreads[thread].setDaemon(true);
+          searchThreads[thread].start();
+        }
+
+        for(int thread=0;thread<NUM_SEARCH_THREADS;thread++) {
+          searchThreads[thread].join();
+        }
+
+        if (VERBOSE) {
+          System.out.println("TEST: DONE search: totHits=" + totHits);
+        }
+      } else {
+        Thread.sleep(100);
+      }
+    }
+
+    es.shutdown();
+    es.awaitTermination(1, TimeUnit.SECONDS);
+
+    if (VERBOSE) {
+      System.out.println("TEST: all searching done [" + (System.currentTimeMillis()-t0) + " ms]");
+    }
+
+    //System.out.println("numDocs=" + r.numDocs() + " openDelFileCount=" + dir.openDeleteFileCount());
+    r.close();
+    final Set<String> openDeletedFiles = dir.getOpenDeletedFiles();
+    if (openDeletedFiles.size() > 0) {
+      System.out.println("OBD files: " + openDeletedFiles);
+    }
+    any |= openDeletedFiles.size() > 0;
+
+    assertFalse("saw non-zero open-but-deleted count", any);
+    if (VERBOSE) {
+      System.out.println("TEST: now join");
+    }
+    for(int thread=0;thread<NUM_INDEX_THREADS;thread++) {
+      threads[thread].join();
+    }
+    if (VERBOSE) {
+      System.out.println("TEST: done join [" + (System.currentTimeMillis()-t0) + " ms]; addCount=" + addCount + " delCount=" + delCount);
+    }
+
+    final IndexReader r2 = writer.getReader();
+    final IndexSearcher s = newSearcher(r2);
+    boolean doFail = false;
+    for(String id : delIDs) {
+      final TopDocs hits = s.search(new TermQuery(new Term("docid", id)), 1);
+      if (hits.totalHits != 0) {
+        System.out.println("doc id=" + id + " is supposed to be deleted, but got docID=" + hits.scoreDocs[0].doc);
+        doFail = true;
+      }
+    }
+
+    // Make sure each group of sub-docs are still in docID order:
+    for(SubDocs subDocs : allSubDocs) {
+      if (!subDocs.deleted) {
+        // We sort by relevance but the scores should be identical so sort falls back to by docID:
+        TopDocs hits = s.search(new TermQuery(new Term("packID", subDocs.packID)), 20);
+        assertEquals(subDocs.subIDs.size(), hits.totalHits);
+        int lastDocID = -1;
+        int startDocID = -1;
+        for(ScoreDoc scoreDoc : hits.scoreDocs) {
+          final int docID = scoreDoc.doc;
+          if (lastDocID != -1) {
+            assertEquals(1+lastDocID, docID);
+          } else {
+            startDocID = docID;
+          }
+          lastDocID = docID;
+          final Document doc = s.doc(docID);
+          assertEquals(subDocs.packID, doc.get("packID"));
+        }
+
+        lastDocID = startDocID - 1;
+        for(String subID : subDocs.subIDs) {
+          hits = s.search(new TermQuery(new Term("docid", subID)), 1);
+          assertEquals(1, hits.totalHits);
+          final int docID = hits.scoreDocs[0].doc;
+          if (lastDocID != -1) {
+            assertEquals(1+lastDocID, docID);
+          }
+          lastDocID = docID;
+        }          
+      } else {
+        for(String subID : subDocs.subIDs) {
+          assertEquals(0, s.search(new TermQuery(new Term("docid", subID)), 1).totalHits);
+        }
+      }
+    }
+    
+    final int endID = Integer.parseInt(docs.nextDoc().get("docid"));
+    for(int id=0;id<endID;id++) {
+      String stringID = ""+id;
+      if (!delIDs.contains(stringID)) {
+        final TopDocs hits = s.search(new TermQuery(new Term("docid", stringID)), 1);
+        if (hits.totalHits != 1) {
+          System.out.println("doc id=" + stringID + " is not supposed to be deleted, but got hitCount=" + hits.totalHits);
+          doFail = true;
+        }
+      }
+    }
+    assertFalse(doFail);
+    
+    assertEquals("index=" + writer.segString() + " addCount=" + addCount + " delCount=" + delCount, addCount.get() - delCount.get(), r2.numDocs());
+    r2.close();
+
+    writer.commit();
+    assertEquals("index=" + writer.segString() + " addCount=" + addCount + " delCount=" + delCount, addCount.get() - delCount.get(), writer.numDocs());
+
+    assertFalse(writer.anyNonBulkMerges);
+    writer.close(false);
+    _TestUtil.checkIndex(dir);
+    s.close();
+    dir.close();
+    _TestUtil.rmDir(tempDir);
+    docs.close();
+    if (VERBOSE) {
+      System.out.println("TEST: done [" + (System.currentTimeMillis()-t0) + " ms]");
+    }
+  }
+
+  private int runQuery(IndexSearcher s, Query q) throws Exception {
+    s.search(q, 10);
+    return s.search(q, null, 10, new Sort(new SortField("title", SortField.STRING))).totalHits;
+  }
+
+  private void smokeTestReader(IndexReader r) throws Exception {
+    IndexSearcher s = newSearcher(r);
+    runQuery(s, new TermQuery(new Term("body", "united")));
+    runQuery(s, new TermQuery(new Term("titleTokenized", "states")));
+    PhraseQuery pq = new PhraseQuery();
+    pq.add(new Term("body", "united"));
+    pq.add(new Term("body", "states"));
+    runQuery(s, pq);
+    s.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestNewestSegment.java b/lucene/backwards/src/test/org/apache/lucene/index/TestNewestSegment.java
new file mode 100644
index 0000000..5ddf929
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestNewestSegment.java
@@ -0,0 +1,33 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestNewestSegment extends LuceneTestCase {
+  public void testNewestSegment() throws Exception {
+    Directory directory = newDirectory();
+    IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    assertNull(writer.newestSegment());
+    writer.close();
+    directory.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestNoDeletionPolicy.java b/lucene/backwards/src/test/org/apache/lucene/index/TestNoDeletionPolicy.java
new file mode 100644
index 0000000..59d30aa
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestNoDeletionPolicy.java
@@ -0,0 +1,87 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.Arrays;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.Test;
+
+public class TestNoDeletionPolicy extends LuceneTestCase {
+
+  @Test
+  public void testNoDeletionPolicy() throws Exception {
+    IndexDeletionPolicy idp = NoDeletionPolicy.INSTANCE;
+    idp.onInit(null);
+    idp.onCommit(null);
+  }
+
+  @Test
+  public void testFinalSingleton() throws Exception {
+    assertTrue(Modifier.isFinal(NoDeletionPolicy.class.getModifiers()));
+    Constructor<?>[] ctors = NoDeletionPolicy.class.getDeclaredConstructors();
+    assertEquals("expected 1 private ctor only: " + Arrays.toString(ctors), 1, ctors.length);
+    assertTrue("that 1 should be private: " + ctors[0], Modifier.isPrivate(ctors[0].getModifiers()));
+  }
+
+  @Test
+  public void testMethodsOverridden() throws Exception {
+    // Ensures that all methods of IndexDeletionPolicy are
+    // overridden/implemented. That's important to ensure that NoDeletionPolicy 
+    // overrides everything, so that no unexpected behavior/error occurs.
+    // NOTE: even though IndexDeletionPolicy is an interface today, and so all
+    // methods must be implemented by NoDeletionPolicy, this test is important
+    // in case one day IDP becomes an abstract class.
+    for (Method m : NoDeletionPolicy.class.getMethods()) {
+      // getDeclaredMethods() returns just those methods that are declared on
+      // NoDeletionPolicy. getMethods() returns those that are visible in that
+      // context, including ones from Object. So just filter out Object. If in
+      // the future IndexDeletionPolicy will become a class that extends a
+      // different class than Object, this will need to change.
+      if (m.getDeclaringClass() != Object.class) {
+        assertTrue(m + " is not overridden !", m.getDeclaringClass() == NoDeletionPolicy.class);
+      }
+    }
+  }
+
+  @Test
+  public void testAllCommitsRemain() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
+    for (int i = 0; i < 10; i++) {
+      Document doc = new Document();
+      doc.add(newField("c", "a" + i, Store.YES, Index.ANALYZED));
+      writer.addDocument(doc);
+      writer.commit();
+      assertEquals("wrong number of commits !", i + 1, IndexReader.listCommits(dir).size());
+    }
+    writer.close();
+    dir.close();
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestNoMergePolicy.java b/lucene/backwards/src/test/org/apache/lucene/index/TestNoMergePolicy.java
new file mode 100644
index 0000000..e73b0b3
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestNoMergePolicy.java
@@ -0,0 +1,71 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.Arrays;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.Test;
+
+public class TestNoMergePolicy extends LuceneTestCase {
+
+  @Test
+  public void testNoMergePolicy() throws Exception {
+    MergePolicy mp = NoMergePolicy.NO_COMPOUND_FILES;
+    assertNull(mp.findMerges(null));
+    assertNull(mp.findMergesForOptimize(null, 0, null));
+    assertNull(mp.findMergesToExpungeDeletes(null));
+    assertFalse(mp.useCompoundFile(null, null));
+    mp.close();
+  }
+
+  @Test
+  public void testCompoundFiles() throws Exception {
+    assertFalse(NoMergePolicy.NO_COMPOUND_FILES.useCompoundFile(null, null));
+    assertTrue(NoMergePolicy.COMPOUND_FILES.useCompoundFile(null, null));
+  }
+
+  @Test
+  public void testFinalSingleton() throws Exception {
+    assertTrue(Modifier.isFinal(NoMergePolicy.class.getModifiers()));
+    Constructor<?>[] ctors = NoMergePolicy.class.getDeclaredConstructors();
+    assertEquals("expected 1 private ctor only: " + Arrays.toString(ctors), 1, ctors.length);
+    assertTrue("that 1 should be private: " + ctors[0], Modifier.isPrivate(ctors[0].getModifiers()));
+  }
+
+  @Test
+  public void testMethodsOverridden() throws Exception {
+    // Ensures that all methods of MergePolicy are overridden. That's important
+    // to ensure that NoMergePolicy overrides everything, so that no unexpected
+    // behavior/error occurs
+    for (Method m : NoMergePolicy.class.getMethods()) {
+      // getDeclaredMethods() returns just those methods that are declared on
+      // NoMergePolicy. getMethods() returns those that are visible in that
+      // context, including ones from Object. So just filter out Object. If in
+      // the future MergePolicy will extend a different class than Object, this
+      // will need to change.
+      if (m.getDeclaringClass() != Object.class) {
+        assertTrue(m + " is not overridden !", m.getDeclaringClass() == NoMergePolicy.class);
+      }
+    }
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestNoMergeScheduler.java b/lucene/backwards/src/test/org/apache/lucene/index/TestNoMergeScheduler.java
new file mode 100644
index 0000000..78fb4dc
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestNoMergeScheduler.java
@@ -0,0 +1,62 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.Arrays;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.Test;
+
+public class TestNoMergeScheduler extends LuceneTestCase {
+
+  @Test
+  public void testNoMergeScheduler() throws Exception {
+    MergeScheduler ms = NoMergeScheduler.INSTANCE;
+    ms.close();
+    ms.merge(null);
+  }
+
+  @Test
+  public void testFinalSingleton() throws Exception {
+    assertTrue(Modifier.isFinal(NoMergeScheduler.class.getModifiers()));
+    Constructor<?>[] ctors = NoMergeScheduler.class.getDeclaredConstructors();
+    assertEquals("expected 1 private ctor only: " + Arrays.toString(ctors), 1, ctors.length);
+    assertTrue("that 1 should be private: " + ctors[0], Modifier.isPrivate(ctors[0].getModifiers()));
+  }
+
+  @Test
+  public void testMethodsOverridden() throws Exception {
+    // Ensures that all methods of MergeScheduler are overridden. That's
+    // important to ensure that NoMergeScheduler overrides everything, so that
+    // no unexpected behavior/error occurs
+    for (Method m : NoMergeScheduler.class.getMethods()) {
+      // getDeclaredMethods() returns just those methods that are declared on
+      // NoMergeScheduler. getMethods() returns those that are visible in that
+      // context, including ones from Object. So just filter out Object. If in
+      // the future MergeScheduler will extend a different class than Object,
+      // this will need to change.
+      if (m.getDeclaringClass() != Object.class) {
+        assertTrue(m + " is not overridden !", m.getDeclaringClass() == NoMergeScheduler.class);
+      }
+    }
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestNorms.java b/lucene/backwards/src/test/org/apache/lucene/index/TestNorms.java
new file mode 100755
index 0000000..9d8f412
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestNorms.java
@@ -0,0 +1,280 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Random;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Test that norms info is preserved during index life - including
+ * separate norms, addDocument, addIndexes, optimize.
+ */
+public class TestNorms extends LuceneTestCase {
+
+  private class SimilarityOne extends DefaultSimilarity {
+    @Override
+    public float computeNorm(String fieldName, FieldInvertState state) {
+      // Disable length norm
+      return state.getBoost();
+    }
+  }
+
+  private static final int NUM_FIELDS = 10;
+  
+  private Similarity similarityOne;
+  private Analyzer anlzr;
+  private int numDocNorms;
+  private ArrayList<Float> norms; 
+  private ArrayList<Float> modifiedNorms; 
+  private float lastNorm = 0;
+  private float normDelta = (float) 0.001;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    similarityOne = new SimilarityOne();
+    anlzr = new MockAnalyzer(random);
+  }
+
+  /**
+   * Test that norms values are preserved as the index is maintained.
+   * Including separate norms.
+   * Including merging indexes with seprate norms. 
+   * Including optimize. 
+   */
+  public void testNorms() throws IOException {
+    Directory dir1 = newDirectory();
+
+    norms = new ArrayList<Float>();
+    modifiedNorms = new ArrayList<Float>();
+
+    createIndex(random, dir1);
+    doTestNorms(random, dir1);
+
+    // test with a single index: index2
+    ArrayList<Float> norms1 = norms;
+    ArrayList<Float> modifiedNorms1 = modifiedNorms;
+    int numDocNorms1 = numDocNorms;
+
+    norms = new ArrayList<Float>();
+    modifiedNorms = new ArrayList<Float>();
+    numDocNorms = 0;
+    
+    Directory dir2 = newDirectory();
+
+    createIndex(random, dir2);
+    doTestNorms(random, dir2);
+
+    // add index1 and index2 to a third index: index3
+    Directory dir3 = newDirectory();
+
+    createIndex(random, dir3);
+    IndexWriter iw = new IndexWriter(dir3, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.APPEND)
+                                     .setMaxBufferedDocs(5).setMergePolicy(newLogMergePolicy(3)));
+    iw.addIndexes(new Directory[]{dir1,dir2});
+    iw.optimize();
+    iw.close();
+    
+    norms1.addAll(norms);
+    norms = norms1;
+    modifiedNorms1.addAll(modifiedNorms);
+    modifiedNorms = modifiedNorms1;
+    numDocNorms += numDocNorms1;
+
+    // test with index3
+    verifyIndex(dir3);
+    doTestNorms(random, dir3);
+    
+    // now with optimize
+    iw = new IndexWriter(dir3, newIndexWriterConfig( TEST_VERSION_CURRENT,
+        anlzr).setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(5).setMergePolicy(newLogMergePolicy(3)));
+    iw.optimize();
+    iw.close();
+    verifyIndex(dir3);
+    
+    dir1.close();
+    dir2.close();
+    dir3.close();
+  }
+
+  private void doTestNorms(Random random, Directory dir) throws IOException {
+    int num = atLeast(1);
+    for (int i=0; i<num; i++) {
+      addDocs(random, dir,12,true);
+      verifyIndex(dir);
+      modifyNormsForF1(dir);
+      verifyIndex(dir);
+      addDocs(random, dir,12,false);
+      verifyIndex(dir);
+      modifyNormsForF1(dir);
+      verifyIndex(dir);
+    }
+  }
+
+  private void createIndex(Random random, Directory dir) throws IOException {
+    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.CREATE)
+        .setMaxBufferedDocs(5).setSimilarity(similarityOne).setMergePolicy(newLogMergePolicy()));
+    LogMergePolicy lmp = (LogMergePolicy) iw.getConfig().getMergePolicy();
+    lmp.setMergeFactor(3);
+    lmp.setUseCompoundFile(true);
+    iw.close();
+  }
+
+  private void modifyNormsForF1(Directory dir) throws IOException {
+    IndexReader ir = IndexReader.open(dir, false);
+    int n = ir.maxDoc();
+    for (int i = 0; i < n; i+=3) { // modify for every third doc
+      int k = (i*3) % modifiedNorms.size();
+      float origNorm = modifiedNorms.get(i).floatValue();
+      float newNorm = modifiedNorms.get(k).floatValue();
+      //System.out.println("Modifying: for "+i+" from "+origNorm+" to "+newNorm);
+      //System.out.println("      and: for "+k+" from "+newNorm+" to "+origNorm);
+      modifiedNorms.set(i, Float.valueOf(newNorm));
+      modifiedNorms.set(k, Float.valueOf(origNorm));
+      ir.setNorm(i, "f"+1, newNorm); 
+      ir.setNorm(k, "f"+1, origNorm); 
+    }
+    ir.close();
+  }
+
+
+  private void verifyIndex(Directory dir) throws IOException {
+    IndexReader ir = IndexReader.open(dir, false);
+    for (int i = 0; i < NUM_FIELDS; i++) {
+      String field = "f"+i;
+      byte b[] = ir.norms(field);
+      assertEquals("number of norms mismatches",numDocNorms,b.length);
+      ArrayList<Float> storedNorms = (i==1 ? modifiedNorms : norms);
+      for (int j = 0; j < b.length; j++) {
+        float norm = similarityOne.decodeNormValue(b[j]);
+        float norm1 = storedNorms.get(j).floatValue();
+        assertEquals("stored norm value of "+field+" for doc "+j+" is "+norm+" - a mismatch!", norm, norm1, 0.000001);
+      }
+    }
+    ir.close();
+  }
+
+  private void addDocs(Random random, Directory dir, int ndocs, boolean compound) throws IOException {
+    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.APPEND)
+        .setMaxBufferedDocs(5).setSimilarity(similarityOne).setMergePolicy(newLogMergePolicy()));
+    LogMergePolicy lmp = (LogMergePolicy) iw.getConfig().getMergePolicy();
+    lmp.setMergeFactor(3);
+    lmp.setUseCompoundFile(compound);
+    for (int i = 0; i < ndocs; i++) {
+      iw.addDocument(newDoc());
+    }
+    iw.close();
+  }
+
+  // create the next document
+  private Document newDoc() {
+    Document d = new Document();
+    float boost = nextNorm();
+    for (int i = 0; i < 10; i++) {
+      Field f = newField("f"+i,"v"+i,Store.NO,Index.NOT_ANALYZED);
+      f.setBoost(boost);
+      d.add(f);
+    }
+    return d;
+  }
+
+  // return unique norm values that are unchanged by encoding/decoding
+  private float nextNorm() {
+    float norm = lastNorm + normDelta;
+    do {
+      float norm1 = similarityOne.decodeNormValue(similarityOne.encodeNormValue(norm));
+      if (norm1 > lastNorm) {
+        //System.out.println(norm1+" > "+lastNorm);
+        norm = norm1;
+        break;
+      }
+      norm += normDelta;
+    } while (true);
+    norms.add(numDocNorms, Float.valueOf(norm));
+    modifiedNorms.add(numDocNorms, Float.valueOf(norm));
+    //System.out.println("creating norm("+numDocNorms+"): "+norm);
+    numDocNorms ++;
+    lastNorm = (norm>10 ? 0 : norm); //there's a limit to how many distinct values can be stored in a ingle byte
+    return norm;
+  }
+  
+  class CustomNormEncodingSimilarity extends DefaultSimilarity {
+    @Override
+    public byte encodeNormValue(float f) {
+      return (byte) f;
+    }
+    
+    @Override
+    public float decodeNormValue(byte b) {
+      return (float) b;
+    }
+
+    @Override
+    public float computeNorm(String field, FieldInvertState state) {
+      return (float) state.getLength();
+    }
+  }
+  
+  // LUCENE-1260
+  public void testCustomEncoder() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    config.setSimilarity(new CustomNormEncodingSimilarity());
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir, config);
+    Document doc = new Document();
+    Field foo = newField("foo", "", Field.Store.NO, Field.Index.ANALYZED);
+    Field bar = newField("bar", "", Field.Store.NO, Field.Index.ANALYZED);
+    doc.add(foo);
+    doc.add(bar);
+    
+    for (int i = 0; i < 100; i++) {
+      bar.setValue("singleton");
+      writer.addDocument(doc);
+    }
+    
+    IndexReader reader = writer.getReader();
+    writer.close();
+    
+    byte fooNorms[] = reader.norms("foo");
+    for (int i = 0; i < reader.maxDoc(); i++)
+      assertEquals(0, fooNorms[i]);
+    
+    byte barNorms[] = reader.norms("bar");
+    for (int i = 0; i < reader.maxDoc(); i++)
+      assertEquals(1, barNorms[i]);
+    
+    reader.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestOmitPositions.java b/lucene/backwards/src/test/org/apache/lucene/index/TestOmitPositions.java
new file mode 100644
index 0000000..5e96319
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestOmitPositions.java
@@ -0,0 +1,237 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * 
+ * @lucene.experimental
+ */
+public class TestOmitPositions extends LuceneTestCase {
+
+  public void testBasic() throws Exception {   
+    Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random, dir);
+    Document doc = new Document();
+    Field f = newField("foo", "this is a test test", Field.Index.ANALYZED);
+    f.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
+    doc.add(f);
+    for (int i = 0; i < 100; i++) {
+      w.addDocument(doc);
+    }
+    
+    IndexReader reader = w.getReader();
+    w.close();
+    
+    TermPositions tp = reader.termPositions(new Term("foo", "test"));
+    while (tp.next()) {
+      assertEquals(2, tp.freq());
+      assertEquals(0, tp.nextPosition());
+      assertEquals(0, tp.nextPosition());
+    }
+    
+    TermDocs te = reader.termDocs(new Term("foo", "test"));
+    while (te.next()) {
+      assertEquals(2, te.freq());
+    }
+    
+    reader.close();
+    dir.close();
+  }
+  
+  // Tests whether the DocumentWriter correctly enable the
+  // omitTermFreqAndPositions bit in the FieldInfo
+  public void testPositions() throws Exception {
+    Directory ram = newDirectory();
+    Analyzer analyzer = new MockAnalyzer(random);
+    IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer));
+    Document d = new Document();
+        
+    // f1,f2,f3: docs only
+    Field f1 = newField("f1", "This field has docs only", Field.Store.NO, Field.Index.ANALYZED);
+    f1.setIndexOptions(IndexOptions.DOCS_ONLY);
+    d.add(f1);
+       
+    Field f2 = newField("f2", "This field has docs only", Field.Store.NO, Field.Index.ANALYZED);
+    f2.setIndexOptions(IndexOptions.DOCS_ONLY);
+    d.add(f2);
+    
+    Field f3 = newField("f3", "This field has docs only", Field.Store.NO, Field.Index.ANALYZED);
+    f3.setIndexOptions(IndexOptions.DOCS_ONLY);
+    d.add(f3);
+    
+    // f4,f5,f6 docs and freqs
+    Field f4 = newField("f4", "This field has docs and freqs", Field.Store.NO, Field.Index.ANALYZED);
+    f4.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
+    d.add(f4);
+       
+    Field f5 = newField("f5", "This field has docs and freqs", Field.Store.NO, Field.Index.ANALYZED);
+    f5.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
+    d.add(f5);
+    
+    Field f6 = newField("f6", "This field has docs and freqs", Field.Store.NO, Field.Index.ANALYZED);
+    f6.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
+    d.add(f6);
+    
+    // f7,f8,f9 docs/freqs/positions
+    Field f7 = newField("f7", "This field has docs and freqs and positions", Field.Store.NO, Field.Index.ANALYZED);
+    f7.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+    d.add(f7);
+       
+    Field f8 = newField("f8", "This field has docs and freqs and positions", Field.Store.NO, Field.Index.ANALYZED);
+    f8.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+    d.add(f8);
+    
+    Field f9 = newField("f9", "This field has docs and freqs and positions", Field.Store.NO, Field.Index.ANALYZED);
+    f9.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+    d.add(f9);
+        
+    writer.addDocument(d);
+    writer.optimize();
+
+    // now we add another document which has docs-only for f1, f4, f7, docs/freqs for f2, f5, f8, 
+    // and docs/freqs/positions for f3, f6, f9
+    d = new Document();
+    
+    // f1,f4,f7: docs only
+    f1 = newField("f1", "This field has docs only", Field.Store.NO, Field.Index.ANALYZED);
+    f1.setIndexOptions(IndexOptions.DOCS_ONLY);
+    d.add(f1);
+    
+    f4 = newField("f4", "This field has docs only", Field.Store.NO, Field.Index.ANALYZED);
+    f4.setIndexOptions(IndexOptions.DOCS_ONLY);
+    d.add(f4);
+    
+    f7 = newField("f7", "This field has docs only", Field.Store.NO, Field.Index.ANALYZED);
+    f7.setIndexOptions(IndexOptions.DOCS_ONLY);
+    d.add(f7);
+
+    // f2, f5, f8: docs and freqs
+    f2 = newField("f2", "This field has docs and freqs", Field.Store.NO, Field.Index.ANALYZED);
+    f2.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
+    d.add(f2);
+    
+    f5 = newField("f5", "This field has docs and freqs", Field.Store.NO, Field.Index.ANALYZED);
+    f5.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
+    d.add(f5);
+    
+    f8 = newField("f8", "This field has docs and freqs", Field.Store.NO, Field.Index.ANALYZED);
+    f8.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
+    d.add(f8);
+    
+    // f3, f6, f9: docs and freqs and positions
+    f3 = newField("f3", "This field has docs and freqs and positions", Field.Store.NO, Field.Index.ANALYZED);
+    f3.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+    d.add(f3);     
+    
+    f6 = newField("f6", "This field has docs and freqs and positions", Field.Store.NO, Field.Index.ANALYZED);
+    f6.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
+    d.add(f6);
+    
+    f9 = newField("f9", "This field has docs and freqs and positions", Field.Store.NO, Field.Index.ANALYZED);
+    f9.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+    d.add(f9);
+        
+    writer.addDocument(d);
+
+    // force merge
+    writer.optimize();
+    // flush
+    writer.close();
+
+    SegmentReader reader = SegmentReader.getOnlySegmentReader(ram);
+    FieldInfos fi = reader.fieldInfos();
+    // docs + docs = docs
+    assertEquals(IndexOptions.DOCS_ONLY, fi.fieldInfo("f1").indexOptions);
+    // docs + docs/freqs = docs
+    assertEquals(IndexOptions.DOCS_ONLY, fi.fieldInfo("f2").indexOptions);
+    // docs + docs/freqs/pos = docs
+    assertEquals(IndexOptions.DOCS_ONLY, fi.fieldInfo("f3").indexOptions);
+    // docs/freqs + docs = docs
+    assertEquals(IndexOptions.DOCS_ONLY, fi.fieldInfo("f4").indexOptions);
+    // docs/freqs + docs/freqs = docs/freqs
+    assertEquals(IndexOptions.DOCS_AND_FREQS, fi.fieldInfo("f5").indexOptions);
+    // docs/freqs + docs/freqs/pos = docs/freqs
+    assertEquals(IndexOptions.DOCS_AND_FREQS, fi.fieldInfo("f6").indexOptions);
+    // docs/freqs/pos + docs = docs
+    assertEquals(IndexOptions.DOCS_ONLY, fi.fieldInfo("f7").indexOptions);
+    // docs/freqs/pos + docs/freqs = docs/freqs
+    assertEquals(IndexOptions.DOCS_AND_FREQS, fi.fieldInfo("f8").indexOptions);
+    // docs/freqs/pos + docs/freqs/pos = docs/freqs/pos
+    assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, fi.fieldInfo("f9").indexOptions);
+    
+    reader.close();
+    ram.close();
+  }
+  
+  private void assertNoPrx(Directory dir) throws Throwable {
+    final String[] files = dir.listAll();
+    for(int i=0;i<files.length;i++) {
+      assertFalse(files[i].endsWith(".prx"));
+      assertFalse(files[i].endsWith(".pos"));
+    }
+  }
+
+  // Verifies no *.prx exists when all fields omit term positions:
+  public void testNoPrxFile() throws Throwable {
+    Directory ram = newDirectory();
+    Analyzer analyzer = new MockAnalyzer(random);
+    IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig(
+                                                                   TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(3).setMergePolicy(newLogMergePolicy()));
+    LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
+    lmp.setMergeFactor(2);
+    lmp.setUseCompoundFile(false);
+    Document d = new Document();
+        
+    Field f1 = newField("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
+    f1.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
+    d.add(f1);
+
+    for(int i=0;i<30;i++)
+      writer.addDocument(d);
+
+    writer.commit();
+
+    assertNoPrx(ram);
+    
+    // now add some documents with positions, and check there is no prox after optimization
+    d = new Document();
+    f1 = newField("f1", "This field has positions", Field.Store.NO, Field.Index.ANALYZED);
+    d.add(f1);
+    
+    for(int i=0;i<30;i++)
+      writer.addDocument(d);
+
+    // force merge
+    writer.optimize();
+    // flush
+    writer.close();
+
+    assertNoPrx(ram);
+    ram.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestOmitTf.java b/lucene/backwards/src/test/org/apache/lucene/index/TestOmitTf.java
new file mode 100644
index 0000000..bf24846
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestOmitTf.java
@@ -0,0 +1,421 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Collection;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.search.Explanation.IDFExplanation;
+
+
+public class TestOmitTf extends LuceneTestCase {
+  
+  public static class SimpleSimilarity extends Similarity {
+    @Override public float computeNorm(String field, FieldInvertState state) { return state.getBoost(); }
+    @Override public float queryNorm(float sumOfSquaredWeights) { return 1.0f; }
+    @Override public float tf(float freq) { return freq; }
+    @Override public float sloppyFreq(int distance) { return 2.0f; }
+    @Override public float idf(int docFreq, int numDocs) { return 1.0f; }
+    @Override public float coord(int overlap, int maxOverlap) { return 1.0f; }
+    @Override public IDFExplanation idfExplain(Collection<Term> terms, Searcher searcher) throws IOException {
+      return new IDFExplanation() {
+        @Override
+        public float getIdf() {
+          return 1.0f;
+        }
+        @Override
+        public String explain() {
+          return "Inexplicable";
+        }
+      };
+    }
+  }
+
+  // Tests whether the DocumentWriter correctly enable the
+  // omitTermFreqAndPositions bit in the FieldInfo
+  public void testOmitTermFreqAndPositions() throws Exception {
+    Directory ram = newDirectory();
+    Analyzer analyzer = new MockAnalyzer(random);
+    IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer));
+    Document d = new Document();
+        
+    // this field will have Tf
+    Field f1 = newField("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
+    d.add(f1);
+       
+    // this field will NOT have Tf
+    Field f2 = newField("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.ANALYZED);
+    f2.setIndexOptions(IndexOptions.DOCS_ONLY);
+    d.add(f2);
+        
+    writer.addDocument(d);
+    writer.optimize();
+    // now we add another document which has term freq for field f2 and not for f1 and verify if the SegmentMerger
+    // keep things constant
+    d = new Document();
+        
+    // Reverese
+    f1.setIndexOptions(IndexOptions.DOCS_ONLY);
+    d.add(f1);
+        
+    f2.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);        
+    d.add(f2);
+        
+    writer.addDocument(d);
+    // force merge
+    writer.optimize();
+    // flush
+    writer.close();
+
+    SegmentReader reader = SegmentReader.getOnlySegmentReader(ram);
+    FieldInfos fi = reader.fieldInfos();
+    assertEquals("OmitTermFreqAndPositions field bit should be set.", IndexOptions.DOCS_ONLY, fi.fieldInfo("f1").indexOptions);
+    assertEquals("OmitTermFreqAndPositions field bit should be set.", IndexOptions.DOCS_ONLY, fi.fieldInfo("f2").indexOptions);
+        
+    reader.close();
+    ram.close();
+  }
+ 
+  // Tests whether merging of docs that have different
+  // omitTermFreqAndPositions for the same field works
+  public void testMixedMerge() throws Exception {
+    Directory ram = newDirectory();
+    Analyzer analyzer = new MockAnalyzer(random);
+    IndexWriter writer = new IndexWriter(
+        ram,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).
+            setMaxBufferedDocs(3).
+            setMergePolicy(newLogMergePolicy(2))
+    );
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    Document d = new Document();
+        
+    // this field will have Tf
+    Field f1 = newField("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
+    d.add(f1);
+       
+    // this field will NOT have Tf
+    Field f2 = newField("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.ANALYZED);
+    f2.setIndexOptions(IndexOptions.DOCS_ONLY);
+    d.add(f2);
+
+    for(int i=0;i<30;i++)
+      writer.addDocument(d);
+        
+    // now we add another document which has term freq for field f2 and not for f1 and verify if the SegmentMerger
+    // keep things constant
+    d = new Document();
+        
+    // Reverese
+    f1.setIndexOptions(IndexOptions.DOCS_ONLY);
+    d.add(f1);
+        
+    f2.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);        
+    d.add(f2);
+        
+    for(int i=0;i<30;i++)
+      writer.addDocument(d);
+        
+    // force merge
+    writer.optimize();
+    // flush
+    writer.close();
+
+    SegmentReader reader = SegmentReader.getOnlySegmentReader(ram);
+    FieldInfos fi = reader.fieldInfos();
+    assertEquals("OmitTermFreqAndPositions field bit should be set.", IndexOptions.DOCS_ONLY, fi.fieldInfo("f1").indexOptions);
+    assertEquals("OmitTermFreqAndPositions field bit should be set.", IndexOptions.DOCS_ONLY, fi.fieldInfo("f2").indexOptions);
+        
+    reader.close();
+    ram.close();
+  }
+
+  // Make sure first adding docs that do not omitTermFreqAndPositions for
+  // field X, then adding docs that do omitTermFreqAndPositions for that same
+  // field, 
+  public void testMixedRAM() throws Exception {
+    Directory ram = newDirectory();
+    Analyzer analyzer = new MockAnalyzer(random);
+    IndexWriter writer = new IndexWriter(
+        ram,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).
+            setMaxBufferedDocs(10).
+            setMergePolicy(newLogMergePolicy(2))
+    );
+    Document d = new Document();
+        
+    // this field will have Tf
+    Field f1 = newField("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
+    d.add(f1);
+       
+    // this field will NOT have Tf
+    Field f2 = newField("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.ANALYZED);
+    d.add(f2);
+
+    for(int i=0;i<5;i++)
+      writer.addDocument(d);
+
+    f2.setIndexOptions(IndexOptions.DOCS_ONLY);
+        
+    for(int i=0;i<20;i++)
+      writer.addDocument(d);
+
+    // force merge
+    writer.optimize();
+
+    // flush
+    writer.close();
+
+    SegmentReader reader = SegmentReader.getOnlySegmentReader(ram);
+    FieldInfos fi = reader.fieldInfos();
+    assertEquals("OmitTermFreqAndPositions field bit should not be set.", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, fi.fieldInfo("f1").indexOptions);
+    assertEquals("OmitTermFreqAndPositions field bit should be set.", IndexOptions.DOCS_ONLY, fi.fieldInfo("f2").indexOptions);
+        
+    reader.close();
+    ram.close();
+  }
+
+  private void assertNoPrx(Directory dir) throws Throwable {
+    final String[] files = dir.listAll();
+    for(int i=0;i<files.length;i++) {
+      assertFalse(files[i].endsWith(".prx"));
+      assertFalse(files[i].endsWith(".pos"));
+    }
+  }
+
+  // Verifies no *.prx exists when all fields omit term freq:
+  public void testNoPrxFile() throws Throwable {
+    Directory ram = newDirectory();
+    Analyzer analyzer = new MockAnalyzer(random);
+    IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig(
+                                                                   TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(3).setMergePolicy(newLogMergePolicy()));
+    LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
+    lmp.setMergeFactor(2);
+    lmp.setUseCompoundFile(false);
+    Document d = new Document();
+        
+    Field f1 = newField("f1", "This field has no term freqs", Field.Store.NO, Field.Index.ANALYZED);
+    f1.setIndexOptions(IndexOptions.DOCS_ONLY);
+    d.add(f1);
+
+    for(int i=0;i<30;i++)
+      writer.addDocument(d);
+
+    writer.commit();
+
+    assertNoPrx(ram);
+    
+    // now add some documents with positions, and check there is no prox after optimization
+    d = new Document();
+    f1 = newField("f1", "This field has positions", Field.Store.NO, Field.Index.ANALYZED);
+    d.add(f1);
+    
+    for(int i=0;i<30;i++)
+      writer.addDocument(d);
+ 
+    // force merge
+    writer.optimize();
+    // flush
+    writer.close();
+
+    assertNoPrx(ram);
+    ram.close();
+  }
+ 
+  // Test scores with one field with Term Freqs and one without, otherwise with equal content 
+  public void testBasic() throws Exception {
+    Directory dir = newDirectory();  
+    Analyzer analyzer = new MockAnalyzer(random);
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).
+            setMaxBufferedDocs(2).
+            setSimilarity(new SimpleSimilarity()).
+            setMergePolicy(newLogMergePolicy(2))
+    );
+    writer.setInfoStream(VERBOSE ? System.out : null);
+        
+    StringBuilder sb = new StringBuilder(265);
+    String term = "term";
+    for(int i = 0; i<30; i++){
+      Document d = new Document();
+      sb.append(term).append(" ");
+      String content  = sb.toString();
+      Field noTf = newField("noTf", content + (i%2==0 ? "" : " notf"), Field.Store.NO, Field.Index.ANALYZED);
+      noTf.setIndexOptions(IndexOptions.DOCS_ONLY);
+      d.add(noTf);
+          
+      Field tf = newField("tf", content + (i%2==0 ? " tf" : ""), Field.Store.NO, Field.Index.ANALYZED);
+      d.add(tf);
+          
+      writer.addDocument(d);
+      //System.out.println(d);
+    }
+        
+    writer.optimize();
+    // flush
+    writer.close();
+
+    /*
+     * Verify the index
+     */         
+    Searcher searcher = new IndexSearcher(dir, true);
+    searcher.setSimilarity(new SimpleSimilarity());
+        
+    Term a = new Term("noTf", term);
+    Term b = new Term("tf", term);
+    Term c = new Term("noTf", "notf");
+    Term d = new Term("tf", "tf");
+    TermQuery q1 = new TermQuery(a);
+    TermQuery q2 = new TermQuery(b);
+    TermQuery q3 = new TermQuery(c);
+    TermQuery q4 = new TermQuery(d);
+
+        
+    searcher.search(q1,
+                    new CountingHitCollector() {
+                      private Scorer scorer;
+                      @Override
+                      public final void setScorer(Scorer scorer) {
+                        this.scorer = scorer;
+                      }
+                      @Override
+                      public final void collect(int doc) throws IOException {
+                        //System.out.println("Q1: Doc=" + doc + " score=" + score);
+                        float score = scorer.score();
+                        assertTrue(score==1.0f);
+                        super.collect(doc);
+                      }
+                    });
+    //System.out.println(CountingHitCollector.getCount());
+        
+        
+    searcher.search(q2,
+                    new CountingHitCollector() {
+                      private Scorer scorer;
+                      @Override
+                      public final void setScorer(Scorer scorer) {
+                        this.scorer = scorer;
+                      }
+                      @Override
+                      public final void collect(int doc) throws IOException {
+                        //System.out.println("Q2: Doc=" + doc + " score=" + score);
+                        float score = scorer.score();
+                        assertEquals(1.0f+doc, score, 0.00001f);
+                        super.collect(doc);
+                      }
+                    });
+    //System.out.println(CountingHitCollector.getCount());
+         
+        
+        
+        
+        
+    searcher.search(q3,
+                    new CountingHitCollector() {
+                      private Scorer scorer;
+                      @Override
+                      public final void setScorer(Scorer scorer) {
+                        this.scorer = scorer;
+                      }
+                      @Override
+                      public final void collect(int doc) throws IOException {
+                        //System.out.println("Q1: Doc=" + doc + " score=" + score);
+                        float score = scorer.score();
+                        assertTrue(score==1.0f);
+                        assertFalse(doc%2==0);
+                        super.collect(doc);
+                      }
+                    });
+    //System.out.println(CountingHitCollector.getCount());
+        
+        
+    searcher.search(q4,
+                    new CountingHitCollector() {
+                      private Scorer scorer;
+                      @Override
+                      public final void setScorer(Scorer scorer) {
+                        this.scorer = scorer;
+                      }
+                      @Override
+                      public final void collect(int doc) throws IOException {
+                        float score = scorer.score();
+                        //System.out.println("Q1: Doc=" + doc + " score=" + score);
+                        assertTrue(score==1.0f);
+                        assertTrue(doc%2==0);
+                        super.collect(doc);
+                      }
+                    });
+    //System.out.println(CountingHitCollector.getCount());
+        
+        
+        
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(q1,Occur.MUST);
+    bq.add(q4,Occur.MUST);
+        
+    searcher.search(bq,
+                    new CountingHitCollector() {
+                      @Override
+                      public final void collect(int doc) throws IOException {
+                        //System.out.println("BQ: Doc=" + doc + " score=" + score);
+                        super.collect(doc);
+                      }
+                    });
+    assertTrue(15 == CountingHitCollector.getCount());
+        
+    searcher.close();     
+    dir.close();
+  }
+     
+  public static class CountingHitCollector extends Collector {
+    static int count=0;
+    static int sum=0;
+    private int docBase = -1;
+    CountingHitCollector(){count=0;sum=0;}
+    @Override
+    public void setScorer(Scorer scorer) throws IOException {}
+    @Override
+    public void collect(int doc) throws IOException {
+      count++;
+      sum += doc + docBase;  // use it to avoid any possibility of being optimized away
+    }
+
+    public static int getCount() { return count; }
+    public static int getSum() { return sum; }
+    
+    @Override
+    public void setNextReader(IndexReader reader, int docBase) {
+      this.docBase = docBase;
+    }
+    @Override
+    public boolean acceptsDocsOutOfOrder() {
+      return true;
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestOptimizeForever.java b/lucene/backwards/src/test/org/apache/lucene/index/TestOptimizeForever.java
new file mode 100644
index 0000000..90c515e
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestOptimizeForever.java
@@ -0,0 +1,106 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LineFileDocs;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+
+public class TestOptimizeForever extends LuceneTestCase {
+
+  // Just counts how many merges are done for optimize
+  private static class MyIndexWriter extends IndexWriter {
+
+    AtomicInteger optimizeMergeCount = new AtomicInteger();
+    private boolean first;
+
+    public MyIndexWriter(Directory dir, IndexWriterConfig conf) throws Exception {
+      super(dir, conf);
+    }
+
+    @Override
+    public void merge(MergePolicy.OneMerge merge) throws CorruptIndexException, IOException {
+      if (merge.optimize && (first || merge.segments.size() == 1)) {
+        first = false;
+        if (VERBOSE) {
+          System.out.println("TEST: optimized merge");
+        }
+        optimizeMergeCount.incrementAndGet();
+      }
+      super.merge(merge);
+    }
+  }
+
+  public void test() throws Exception {
+    final Directory d = newDirectory();
+    final MyIndexWriter w = new MyIndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    w.setInfoStream(VERBOSE ? System.out : null);
+
+    // Try to make an index that requires optimizing:
+    w.getConfig().setMaxBufferedDocs(_TestUtil.nextInt(random, 2, 11));
+    final int numStartDocs = atLeast(20);
+    final LineFileDocs docs = new LineFileDocs(random);
+    for(int docIDX=0;docIDX<numStartDocs;docIDX++) {
+      w.addDocument(docs.nextDoc());
+    }
+    MergePolicy mp = w.getConfig().getMergePolicy();
+    final int mergeAtOnce = 1+w.segmentInfos.size();
+    if (mp instanceof TieredMergePolicy) {
+      ((TieredMergePolicy) mp).setMaxMergeAtOnce(mergeAtOnce);
+    } else if (mp instanceof LogMergePolicy) {
+      ((LogMergePolicy) mp).setMergeFactor(mergeAtOnce);
+    } else {
+      // skip test
+      w.close();
+      d.close();
+      return;
+    }
+
+    final AtomicBoolean doStop = new AtomicBoolean();
+    w.getConfig().setMaxBufferedDocs(2);
+    Thread t = new Thread() {
+      @Override
+      public void run() {
+        try {
+          while (!doStop.get()) {
+            w.updateDocument(new Term("docid", "" + random.nextInt(numStartDocs)),
+                             docs.nextDoc());
+            // Force deletes to apply
+            w.getReader().close();
+          }
+        } catch (Throwable t) {
+          throw new RuntimeException(t);
+        }
+      }
+      };
+    t.start();
+    w.optimize();
+    doStop.set(true);
+    t.join();
+    assertTrue("optimize count is " + w.optimizeMergeCount.get(), w.optimizeMergeCount.get() <= 1);
+    w.close();
+    d.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestParallelReader.java b/lucene/backwards/src/test/org/apache/lucene/index/TestParallelReader.java
new file mode 100644
index 0000000..c7bcefe
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestParallelReader.java
@@ -0,0 +1,329 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Random;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.MapFieldSelector;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestParallelReader extends LuceneTestCase {
+
+  private IndexSearcher parallel;
+  private IndexSearcher single;
+  private Directory dir, dir1, dir2;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    single = single(random);
+    parallel = parallel(random);
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    single.getIndexReader().close();
+    single.close();
+    parallel.getIndexReader().close();
+    parallel.close();
+    dir.close();
+    dir1.close();
+    dir2.close();
+    super.tearDown();
+  }
+
+  public void testQueries() throws Exception {
+    queryTest(new TermQuery(new Term("f1", "v1")));
+    queryTest(new TermQuery(new Term("f1", "v2")));
+    queryTest(new TermQuery(new Term("f2", "v1")));
+    queryTest(new TermQuery(new Term("f2", "v2")));
+    queryTest(new TermQuery(new Term("f3", "v1")));
+    queryTest(new TermQuery(new Term("f3", "v2")));
+    queryTest(new TermQuery(new Term("f4", "v1")));
+    queryTest(new TermQuery(new Term("f4", "v2")));
+
+    BooleanQuery bq1 = new BooleanQuery();
+    bq1.add(new TermQuery(new Term("f1", "v1")), Occur.MUST);
+    bq1.add(new TermQuery(new Term("f4", "v1")), Occur.MUST);
+    queryTest(bq1);
+  }
+
+  public void testFieldNames() throws Exception {
+    Directory dir1 = getDir1(random);
+    Directory dir2 = getDir2(random);
+    ParallelReader pr = new ParallelReader();
+    pr.add(IndexReader.open(dir1, false));
+    pr.add(IndexReader.open(dir2, false));
+    Collection<String> fieldNames = pr.getFieldNames(IndexReader.FieldOption.ALL);
+    assertEquals(4, fieldNames.size());
+    assertTrue(fieldNames.contains("f1"));
+    assertTrue(fieldNames.contains("f2"));
+    assertTrue(fieldNames.contains("f3"));
+    assertTrue(fieldNames.contains("f4"));
+    pr.close();
+    dir1.close();
+    dir2.close();
+  }
+  
+  public void testDocument() throws IOException {
+    Directory dir1 = getDir1(random);
+    Directory dir2 = getDir2(random);
+    ParallelReader pr = new ParallelReader();
+    pr.add(IndexReader.open(dir1, false));
+    pr.add(IndexReader.open(dir2, false));
+
+    Document doc11 = pr.document(0, new MapFieldSelector(new String[] {"f1"}));
+    Document doc24 = pr.document(1, new MapFieldSelector(Arrays.asList(new String[] {"f4"})));
+    Document doc223 = pr.document(1, new MapFieldSelector(new String[] {"f2", "f3"}));
+    
+    assertEquals(1, doc11.getFields().size());
+    assertEquals(1, doc24.getFields().size());
+    assertEquals(2, doc223.getFields().size());
+    
+    assertEquals("v1", doc11.get("f1"));
+    assertEquals("v2", doc24.get("f4"));
+    assertEquals("v2", doc223.get("f2"));
+    assertEquals("v2", doc223.get("f3"));
+    pr.close();
+    dir1.close();
+    dir2.close();
+  }
+  
+  public void testIncompatibleIndexes() throws IOException {
+    // two documents:
+    Directory dir1 = getDir1(random);
+
+    // one document only:
+    Directory dir2 = newDirectory();
+    IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document d3 = new Document();
+    d3.add(newField("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
+    w2.addDocument(d3);
+    w2.close();
+    
+    ParallelReader pr = new ParallelReader();
+    pr.add(IndexReader.open(dir1, false));
+    IndexReader ir = IndexReader.open(dir2, false);
+    try {
+      pr.add(ir);
+      fail("didn't get exptected exception: indexes don't have same number of documents");
+    } catch (IllegalArgumentException e) {
+      // expected exception
+    }
+    pr.close();
+    ir.close();
+    dir1.close();
+    dir2.close();
+  }
+  
+  public void testIsCurrent() throws IOException {
+    Directory dir1 = getDir1(random);
+    Directory dir2 = getDir2(random);
+    ParallelReader pr = new ParallelReader();
+    pr.add(IndexReader.open(dir1, false));
+    pr.add(IndexReader.open(dir2, false));
+    
+    assertTrue(pr.isCurrent());
+    IndexReader modifier = IndexReader.open(dir1, false);
+    modifier.setNorm(0, "f1", 100);
+    modifier.close();
+    
+    // one of the two IndexReaders which ParallelReader is using
+    // is not current anymore
+    assertFalse(pr.isCurrent());
+    
+    modifier = IndexReader.open(dir2, false);
+    modifier.setNorm(0, "f3", 100);
+    modifier.close();
+    
+    // now both are not current anymore
+    assertFalse(pr.isCurrent());
+    pr.close();
+    dir1.close();
+    dir2.close();
+  }
+
+  public void testIsOptimized() throws IOException {
+    Directory dir1 = getDir1(random);
+    Directory dir2 = getDir2(random);
+    
+    // add another document to ensure that the indexes are not optimized
+    IndexWriter modifier = new IndexWriter(
+        dir1,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMergePolicy(newLogMergePolicy(10))
+    );
+    Document d = new Document();
+    d.add(newField("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
+    modifier.addDocument(d);
+    modifier.close();
+
+    modifier = new IndexWriter(
+        dir2,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMergePolicy(newLogMergePolicy(10))
+    );
+    d = new Document();
+    d.add(newField("f2", "v2", Field.Store.YES, Field.Index.ANALYZED));
+    modifier.addDocument(d);
+    modifier.close();
+
+    
+    ParallelReader pr = new ParallelReader();
+    pr.add(IndexReader.open(dir1, false));
+    pr.add(IndexReader.open(dir2, false));
+    assertFalse(pr.isOptimized());
+    pr.close();
+    
+    modifier = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    modifier.optimize();
+    modifier.close();
+    
+    pr = new ParallelReader();
+    pr.add(IndexReader.open(dir1, false));
+    pr.add(IndexReader.open(dir2, false));
+    // just one of the two indexes are optimized
+    assertFalse(pr.isOptimized());
+    pr.close();
+
+    
+    modifier = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    modifier.optimize();
+    modifier.close();
+    
+    pr = new ParallelReader();
+    pr.add(IndexReader.open(dir1, false));
+    pr.add(IndexReader.open(dir2, false));
+    // now both indexes are optimized
+    assertTrue(pr.isOptimized());
+    pr.close();
+    dir1.close();
+    dir2.close();
+  }
+
+  public void testAllTermDocs() throws IOException {
+    Directory dir1 = getDir1(random);
+    Directory dir2 = getDir2(random);
+    ParallelReader pr = new ParallelReader();
+    pr.add(IndexReader.open(dir1, false));
+    pr.add(IndexReader.open(dir2, false));
+    int NUM_DOCS = 2;
+    TermDocs td = pr.termDocs(null);
+    for(int i=0;i<NUM_DOCS;i++) {
+      assertTrue(td.next());
+      assertEquals(i, td.doc());
+      assertEquals(1, td.freq());
+    }
+    td.close();
+    pr.close();
+    dir1.close();
+    dir2.close();
+  }
+    
+  
+  private void queryTest(Query query) throws IOException {
+    ScoreDoc[] parallelHits = parallel.search(query, null, 1000).scoreDocs;
+    ScoreDoc[] singleHits = single.search(query, null, 1000).scoreDocs;
+    assertEquals(parallelHits.length, singleHits.length);
+    for(int i = 0; i < parallelHits.length; i++) {
+      assertEquals(parallelHits[i].score, singleHits[i].score, 0.001f);
+      Document docParallel = parallel.doc(parallelHits[i].doc);
+      Document docSingle = single.doc(singleHits[i].doc);
+      assertEquals(docParallel.get("f1"), docSingle.get("f1"));
+      assertEquals(docParallel.get("f2"), docSingle.get("f2"));
+      assertEquals(docParallel.get("f3"), docSingle.get("f3"));
+      assertEquals(docParallel.get("f4"), docSingle.get("f4"));
+    }
+  }
+
+  // Fields 1-4 indexed together:
+  private IndexSearcher single(Random random) throws IOException {
+    dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document d1 = new Document();
+    d1.add(newField("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
+    d1.add(newField("f2", "v1", Field.Store.YES, Field.Index.ANALYZED));
+    d1.add(newField("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
+    d1.add(newField("f4", "v1", Field.Store.YES, Field.Index.ANALYZED));
+    w.addDocument(d1);
+    Document d2 = new Document();
+    d2.add(newField("f1", "v2", Field.Store.YES, Field.Index.ANALYZED));
+    d2.add(newField("f2", "v2", Field.Store.YES, Field.Index.ANALYZED));
+    d2.add(newField("f3", "v2", Field.Store.YES, Field.Index.ANALYZED));
+    d2.add(newField("f4", "v2", Field.Store.YES, Field.Index.ANALYZED));
+    w.addDocument(d2);
+    w.close();
+
+    return new IndexSearcher(dir, false);
+  }
+
+  // Fields 1 & 2 in one index, 3 & 4 in other, with ParallelReader:
+  private IndexSearcher parallel(Random random) throws IOException {
+    dir1 = getDir1(random);
+    dir2 = getDir2(random);
+    ParallelReader pr = new ParallelReader();
+    pr.add(IndexReader.open(dir1, false));
+    pr.add(IndexReader.open(dir2, false));
+    return newSearcher(pr);
+  }
+
+  private Directory getDir1(Random random) throws IOException {
+    Directory dir1 = newDirectory();
+    IndexWriter w1 = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document d1 = new Document();
+    d1.add(newField("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
+    d1.add(newField("f2", "v1", Field.Store.YES, Field.Index.ANALYZED));
+    w1.addDocument(d1);
+    Document d2 = new Document();
+    d2.add(newField("f1", "v2", Field.Store.YES, Field.Index.ANALYZED));
+    d2.add(newField("f2", "v2", Field.Store.YES, Field.Index.ANALYZED));
+    w1.addDocument(d2);
+    w1.close();
+    return dir1;
+  }
+
+  private Directory getDir2(Random random) throws IOException {
+    Directory dir2 = newDirectory();
+    IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document d3 = new Document();
+    d3.add(newField("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
+    d3.add(newField("f4", "v1", Field.Store.YES, Field.Index.ANALYZED));
+    w2.addDocument(d3);
+    Document d4 = new Document();
+    d4.add(newField("f3", "v2", Field.Store.YES, Field.Index.ANALYZED));
+    d4.add(newField("f4", "v2", Field.Store.YES, Field.Index.ANALYZED));
+    w2.addDocument(d4);
+    w2.close();
+    return dir2;
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java b/lucene/backwards/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java
new file mode 100644
index 0000000..6000706
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java
@@ -0,0 +1,126 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+
+/**
+ * Some tests for {@link ParallelReader}s with empty indexes
+ * 
+ * @author Christian Kohlschuetter
+ */
+public class TestParallelReaderEmptyIndex extends LuceneTestCase {
+
+  /**
+   * Creates two empty indexes and wraps a ParallelReader around. Adding this
+   * reader to a new index should not throw any exception.
+   * 
+   * @throws IOException
+   */
+  public void testEmptyIndex() throws IOException {
+    Directory rd1 = newDirectory();
+    IndexWriter iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    iw.close();
+
+    Directory rd2 = newDirectory(rd1);
+
+    Directory rdOut = newDirectory();
+
+    IndexWriter iwOut = new IndexWriter(rdOut, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    ParallelReader pr = new ParallelReader();
+    pr.add(IndexReader.open(rd1,true));
+    pr.add(IndexReader.open(rd2,true));
+		
+    // When unpatched, Lucene crashes here with a NoSuchElementException (caused by ParallelTermEnum)
+    iwOut.addIndexes(new IndexReader[] { pr });
+		
+    iwOut.optimize();
+    iwOut.close();
+    rdOut.close();
+    rd1.close();
+    rd2.close();
+  }
+
+  /**
+   * This method creates an empty index (numFields=0, numDocs=0) but is marked
+   * to have TermVectors. Adding this index to another index should not throw
+   * any exception.
+   */
+  public void testEmptyIndexWithVectors() throws IOException {
+    Directory rd1 = newDirectory();
+    {
+      IndexWriter iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      Document doc = new Document();
+      doc.add(newField("test", "", Store.NO, Index.ANALYZED,
+                        TermVector.YES));
+      iw.addDocument(doc);
+      doc.add(newField("test", "", Store.NO, Index.ANALYZED,
+                        TermVector.NO));
+      iw.addDocument(doc);
+      iw.close();
+
+      IndexReader ir = IndexReader.open(rd1,false);
+      ir.deleteDocument(0);
+      ir.close();
+
+      iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+      iw.optimize();
+      iw.close();
+    }
+
+    Directory rd2 = newDirectory();
+    {
+      IndexWriter iw = new IndexWriter(rd2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      Document doc = new Document();
+      iw.addDocument(doc);
+      iw.close();
+    }
+
+    Directory rdOut = newDirectory();
+
+    IndexWriter iwOut = new IndexWriter(rdOut, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    ParallelReader pr = new ParallelReader();
+    pr.add(IndexReader.open(rd1,true));
+    pr.add(IndexReader.open(rd2,true));
+
+    // When unpatched, Lucene crashes here with an ArrayIndexOutOfBoundsException (caused by TermVectorsWriter)
+    iwOut.addIndexes(new IndexReader[] { pr });
+
+    // ParallelReader closes any IndexReader you added to it:
+    pr.close();
+
+    rd1.close();
+    rd2.close();
+		
+    iwOut.optimize();
+    iwOut.close();
+    
+    rdOut.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestParallelTermEnum.java b/lucene/backwards/src/test/org/apache/lucene/index/TestParallelTermEnum.java
new file mode 100755
index 0000000..eb00013
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestParallelTermEnum.java
@@ -0,0 +1,184 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.store.Directory;
+
+public class TestParallelTermEnum extends LuceneTestCase {
+    private IndexReader ir1;
+    private IndexReader ir2;
+    private Directory rd1;
+    private Directory rd2;
+    
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        Document doc;
+        rd1 = newDirectory();
+        IndexWriter iw1 = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+
+        doc = new Document();
+        doc.add(newField("field1", "the quick brown fox jumps", Store.YES,
+            Index.ANALYZED));
+        doc.add(newField("field2", "the quick brown fox jumps", Store.YES,
+            Index.ANALYZED));
+        doc.add(newField("field4", "", Store.NO, Index.ANALYZED));
+        iw1.addDocument(doc);
+
+        iw1.close();
+
+        rd2 = newDirectory();
+        IndexWriter iw2 = new IndexWriter(rd2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+
+        doc = new Document();
+        doc.add(newField("field0", "", Store.NO, Index.ANALYZED));
+        doc.add(newField("field1", "the fox jumps over the lazy dog",
+            Store.YES, Index.ANALYZED));
+        doc.add(newField("field3", "the fox jumps over the lazy dog",
+            Store.YES, Index.ANALYZED));
+        iw2.addDocument(doc);
+
+        iw2.close();
+
+        this.ir1 = IndexReader.open(rd1, true);
+        this.ir2 = IndexReader.open(rd2, true);
+    }
+
+    @Override
+    public void tearDown() throws Exception {
+        ir1.close();
+        ir2.close();
+        rd1.close();
+        rd2.close();
+        super.tearDown();
+    }
+
+    public void test1() throws IOException {
+        ParallelReader pr = new ParallelReader();
+        pr.add(ir1);
+        pr.add(ir2);
+
+        TermDocs td = pr.termDocs();
+
+        TermEnum te = pr.terms();
+        assertTrue(te.next());
+        assertEquals("field1:brown", te.term().toString());
+        td.seek(te.term());
+        assertTrue(td.next());
+        assertEquals(0, td.doc());
+        assertFalse(td.next());
+        assertTrue(te.next());
+        assertEquals("field1:fox", te.term().toString());
+        td.seek(te.term());
+        assertTrue(td.next());
+        assertEquals(0, td.doc());
+        assertFalse(td.next());
+        assertTrue(te.next());
+        assertEquals("field1:jumps", te.term().toString());
+        td.seek(te.term());
+        assertTrue(td.next());
+        assertEquals(0, td.doc());
+        assertFalse(td.next());
+        assertTrue(te.next());
+        assertEquals("field1:quick", te.term().toString());
+        td.seek(te.term());
+        assertTrue(td.next());
+        assertEquals(0, td.doc());
+        assertFalse(td.next());
+        assertTrue(te.next());
+        assertEquals("field1:the", te.term().toString());
+        td.seek(te.term());
+        assertTrue(td.next());
+        assertEquals(0, td.doc());
+        assertFalse(td.next());
+        assertTrue(te.next());
+        assertEquals("field2:brown", te.term().toString());
+        td.seek(te.term());
+        assertTrue(td.next());
+        assertEquals(0, td.doc());
+        assertFalse(td.next());
+        assertTrue(te.next());
+        assertEquals("field2:fox", te.term().toString());
+        td.seek(te.term());
+        assertTrue(td.next());
+        assertEquals(0, td.doc());
+        assertFalse(td.next());
+        assertTrue(te.next());
+        assertEquals("field2:jumps", te.term().toString());
+        td.seek(te.term());
+        assertTrue(td.next());
+        assertEquals(0, td.doc());
+        assertFalse(td.next());
+        assertTrue(te.next());
+        assertEquals("field2:quick", te.term().toString());
+        td.seek(te.term());
+        assertTrue(td.next());
+        assertEquals(0, td.doc());
+        assertFalse(td.next());
+        assertTrue(te.next());
+        assertEquals("field2:the", te.term().toString());
+        td.seek(te.term());
+        assertTrue(td.next());
+        assertEquals(0, td.doc());
+        assertFalse(td.next());
+        assertTrue(te.next());
+        assertEquals("field3:dog", te.term().toString());
+        td.seek(te.term());
+        assertTrue(td.next());
+        assertEquals(0, td.doc());
+        assertFalse(td.next());
+        assertTrue(te.next());
+        assertEquals("field3:fox", te.term().toString());
+        td.seek(te.term());
+        assertTrue(td.next());
+        assertEquals(0, td.doc());
+        assertFalse(td.next());
+        assertTrue(te.next());
+        assertEquals("field3:jumps", te.term().toString());
+        td.seek(te.term());
+        assertTrue(td.next());
+        assertEquals(0, td.doc());
+        assertFalse(td.next());
+        assertTrue(te.next());
+        assertEquals("field3:lazy", te.term().toString());
+        td.seek(te.term());
+        assertTrue(td.next());
+        assertEquals(0, td.doc());
+        assertFalse(td.next());
+        assertTrue(te.next());
+        assertEquals("field3:over", te.term().toString());
+        td.seek(te.term());
+        assertTrue(td.next());
+        assertEquals(0, td.doc());
+        assertFalse(td.next());
+        assertTrue(te.next());
+        assertEquals("field3:the", te.term().toString());
+        td.seek(te.term());
+        assertTrue(td.next());
+        assertEquals(0, td.doc());
+        assertFalse(td.next());
+        assertFalse(te.next());
+    }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java b/lucene/backwards/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java
new file mode 100644
index 0000000..7afe032
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java
@@ -0,0 +1,265 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.index.PayloadProcessorProvider.DirPayloadProcessor;
+import org.apache.lucene.index.PayloadProcessorProvider.PayloadProcessor;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.Test;
+
+public class TestPayloadProcessorProvider extends LuceneTestCase {
+
+  private static final class PerDirPayloadProcessor extends PayloadProcessorProvider {
+
+    private Map<Directory, DirPayloadProcessor> processors;
+
+    public PerDirPayloadProcessor(Map<Directory, DirPayloadProcessor> processors) {
+      this.processors = processors;
+    }
+
+    @Override
+    public DirPayloadProcessor getDirProcessor(Directory dir) throws IOException {
+      return processors.get(dir);
+    }
+
+  }
+
+  private static final class PerTermPayloadProcessor extends DirPayloadProcessor {
+
+    @Override
+    public PayloadProcessor getProcessor(Term term) throws IOException {
+      // don't process payloads of terms other than "p:p1"
+      if (!term.field().equals("p") || !term.text().equals("p1")) {
+        return null;
+      }
+      
+      // All other terms are processed the same way
+      return new DeletePayloadProcessor();
+    }
+    
+  }
+  
+  /** deletes the incoming payload */
+  private static final class DeletePayloadProcessor extends PayloadProcessor {
+
+    @Override
+    public int payloadLength() throws IOException {
+      return 0;
+    }
+
+    @Override
+    public byte[] processPayload(byte[] payload, int start, int length) throws IOException {
+      return payload;
+    }
+
+  }
+
+  private static final class PayloadTokenStream extends TokenStream {
+
+    private final PayloadAttribute payload = addAttribute(PayloadAttribute.class);
+    private final CharTermAttribute term = addAttribute(CharTermAttribute.class);
+
+    private boolean called = false;
+    private String t;
+
+    public PayloadTokenStream(String t) {
+      this.t = t;
+    }
+
+    @Override
+    public boolean incrementToken() throws IOException {
+      if (called) {
+        return false;
+      }
+
+      called = true;
+      byte[] p = new byte[] { 1 };
+      payload.setPayload(new Payload(p));
+      term.append(t);
+      return true;
+    }
+
+    @Override
+    public void reset() throws IOException {
+      super.reset();
+      called = false;
+      term.setEmpty();
+    }
+  }
+
+  private static final int NUM_DOCS = 10;
+
+  private IndexWriterConfig getConfig(Random random) {
+    return newIndexWriterConfig(random, TEST_VERSION_CURRENT, new WhitespaceAnalyzer(
+        TEST_VERSION_CURRENT));
+  }
+
+  private void populateDirs(Random random, Directory[] dirs, boolean multipleCommits)
+      throws IOException {
+    for (int i = 0; i < dirs.length; i++) {
+      dirs[i] = newDirectory();
+      populateDocs(random, dirs[i], multipleCommits);
+      verifyPayloadExists(dirs[i], new Term("p", "p1"), NUM_DOCS);
+      verifyPayloadExists(dirs[i], new Term("p", "p2"), NUM_DOCS);
+    }
+  }
+
+  private void populateDocs(Random random, Directory dir, boolean multipleCommits)
+      throws IOException {
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).
+            setMergePolicy(newLogMergePolicy(10))
+    );
+    TokenStream payloadTS1 = new PayloadTokenStream("p1");
+    TokenStream payloadTS2 = new PayloadTokenStream("p2");
+    for (int i = 0; i < NUM_DOCS; i++) {
+      Document doc = new Document();
+      doc.add(newField("id", "doc" + i, Store.NO, Index.NOT_ANALYZED_NO_NORMS));
+      doc.add(newField("content", "doc content " + i, Store.NO, Index.ANALYZED));
+      doc.add(new Field("p", payloadTS1));
+      doc.add(new Field("p", payloadTS2));
+      writer.addDocument(doc);
+      if (multipleCommits && (i % 4 == 0)) {
+        writer.commit();
+      }
+    }
+    writer.close();
+  }
+
+  private void verifyPayloadExists(Directory dir, Term term, int numExpected)
+      throws IOException {
+    IndexReader reader = IndexReader.open(dir);
+    try {
+      int numPayloads = 0;
+      TermPositions tp = reader.termPositions(term);
+      while (tp.next()) {
+        tp.nextPosition();
+        if (tp.isPayloadAvailable()) {
+          assertEquals(1, tp.getPayloadLength());
+          byte[] p = new byte[tp.getPayloadLength()];
+          tp.getPayload(p, 0);
+          assertEquals(1, p[0]);
+          ++numPayloads;
+        }
+      }
+      assertEquals(numExpected, numPayloads);
+    } finally {
+      reader.close();
+    }
+  }
+
+  private void doTest(Random random, boolean addToEmptyIndex,
+      int numExpectedPayloads, boolean multipleCommits) throws IOException {
+    Directory[] dirs = new Directory[2];
+    populateDirs(random, dirs, multipleCommits);
+
+    Directory dir = newDirectory();
+    if (!addToEmptyIndex) {
+      populateDocs(random, dir, multipleCommits);
+      verifyPayloadExists(dir, new Term("p", "p1"), NUM_DOCS);
+      verifyPayloadExists(dir, new Term("p", "p2"), NUM_DOCS);
+    }
+
+    // Add two source dirs. By not adding the dest dir, we ensure its payloads
+    // won't get processed.
+    Map<Directory, DirPayloadProcessor> processors = new HashMap<Directory, DirPayloadProcessor>();
+    for (Directory d : dirs) {
+      processors.put(d, new PerTermPayloadProcessor());
+    }
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
+    writer.setPayloadProcessorProvider(new PerDirPayloadProcessor(processors));
+
+    IndexReader[] readers = new IndexReader[dirs.length];
+    for (int i = 0; i < readers.length; i++) {
+      readers[i] = IndexReader.open(dirs[i]);
+    }
+    try {
+      writer.addIndexes(readers);
+    } finally {
+      for (IndexReader r : readers) {
+        r.close();
+      }
+    }
+    writer.close();
+    verifyPayloadExists(dir, new Term("p", "p1"), numExpectedPayloads);
+    // the second term should always have all payloads
+    numExpectedPayloads = NUM_DOCS * dirs.length
+        + (addToEmptyIndex ? 0 : NUM_DOCS);
+    verifyPayloadExists(dir, new Term("p", "p2"), numExpectedPayloads);
+    for (Directory d : dirs)
+      d.close();
+    dir.close();
+  }
+
+  @Test
+  public void testAddIndexes() throws Exception {
+    // addIndexes - single commit in each
+    doTest(random, true, 0, false);
+
+    // addIndexes - multiple commits in each
+    doTest(random, true, 0, true);
+  }
+
+  @Test
+  public void testAddIndexesIntoExisting() throws Exception {
+    // addIndexes - single commit in each
+    doTest(random, false, NUM_DOCS, false);
+
+    // addIndexes - multiple commits in each
+    doTest(random, false, NUM_DOCS, true);
+  }
+
+  @Test
+  public void testRegularMerges() throws Exception {
+    Directory dir = newDirectory();
+    populateDocs(random, dir, true);
+    verifyPayloadExists(dir, new Term("p", "p1"), NUM_DOCS);
+    verifyPayloadExists(dir, new Term("p", "p2"), NUM_DOCS);
+
+    // Add two source dirs. By not adding the dest dir, we ensure its payloads
+    // won't get processed.
+    Map<Directory, DirPayloadProcessor> processors = new HashMap<Directory, DirPayloadProcessor>();
+    processors.put(dir, new PerTermPayloadProcessor());
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
+    writer.setPayloadProcessorProvider(new PerDirPayloadProcessor(processors));
+    writer.optimize();
+    writer.close();
+
+    verifyPayloadExists(dir, new Term("p", "p1"), 0);
+    verifyPayloadExists(dir, new Term("p", "p2"), NUM_DOCS);
+    dir.close();
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestPayloads.java b/lucene/backwards/src/test/org/apache/lucene/index/TestPayloads.java
new file mode 100644
index 0000000..f1dc05b
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestPayloads.java
@@ -0,0 +1,617 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.io.Reader;
+import java.io.UnsupportedEncodingException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.WhitespaceTokenizer;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+
+public class TestPayloads extends LuceneTestCase {
+    
+    // Simple tests to test the Payload class
+    public void testPayload() throws Exception {
+        byte[] testData = "This is a test!".getBytes();
+        Payload payload = new Payload(testData);
+        assertEquals("Wrong payload length.", testData.length, payload.length());
+        
+        // test copyTo()
+        byte[] target = new byte[testData.length - 1];
+        try {
+            payload.copyTo(target, 0);
+            fail("Expected exception not thrown");
+        } catch (Exception expected) {
+            // expected exception
+        }
+        
+        target = new byte[testData.length + 3];
+        payload.copyTo(target, 3);
+        
+        for (int i = 0; i < testData.length; i++) {
+            assertEquals(testData[i], target[i + 3]);
+        }
+        
+
+        // test toByteArray()
+        target = payload.toByteArray();
+        assertByteArrayEquals(testData, target);
+
+        // test byteAt()
+        for (int i = 0; i < testData.length; i++) {
+            assertEquals(payload.byteAt(i), testData[i]);
+        }
+        
+        try {
+            payload.byteAt(testData.length + 1);
+            fail("Expected exception not thrown");
+        } catch (Exception expected) {
+            // expected exception
+        }
+        
+        Payload clone = (Payload) payload.clone();
+        assertEquals(payload.length(), clone.length());
+        for (int i = 0; i < payload.length(); i++) {
+          assertEquals(payload.byteAt(i), clone.byteAt(i));
+        }
+        
+    }
+
+    // Tests whether the DocumentWriter and SegmentMerger correctly enable the
+    // payload bit in the FieldInfo
+    public void testPayloadFieldBit() throws Exception {
+        Directory ram = newDirectory();
+        PayloadAnalyzer analyzer = new PayloadAnalyzer();
+        IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer));
+        Document d = new Document();
+        // this field won't have any payloads
+        d.add(newField("f1", "This field has no payloads", Field.Store.NO, Field.Index.ANALYZED));
+        // this field will have payloads in all docs, however not for all term positions,
+        // so this field is used to check if the DocumentWriter correctly enables the payloads bit
+        // even if only some term positions have payloads
+        d.add(newField("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.ANALYZED));
+        d.add(newField("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.ANALYZED));
+        // this field is used to verify if the SegmentMerger enables payloads for a field if it has payloads 
+        // enabled in only some documents
+        d.add(newField("f3", "This field has payloads in some docs", Field.Store.NO, Field.Index.ANALYZED));
+        // only add payload data for field f2
+        analyzer.setPayloadData("f2", 1, "somedata".getBytes(), 0, 1);
+        writer.addDocument(d);
+        // flush
+        writer.close();        
+        
+        SegmentReader reader = SegmentReader.getOnlySegmentReader(ram);
+        FieldInfos fi = reader.fieldInfos();
+        assertFalse("Payload field bit should not be set.", fi.fieldInfo("f1").storePayloads);
+        assertTrue("Payload field bit should be set.", fi.fieldInfo("f2").storePayloads);
+        assertFalse("Payload field bit should not be set.", fi.fieldInfo("f3").storePayloads);
+        reader.close();
+        
+        // now we add another document which has payloads for field f3 and verify if the SegmentMerger
+        // enabled payloads for that field
+        writer = new IndexWriter(ram, newIndexWriterConfig( TEST_VERSION_CURRENT,
+            analyzer).setOpenMode(OpenMode.CREATE));
+        d = new Document();
+        d.add(newField("f1", "This field has no payloads", Field.Store.NO, Field.Index.ANALYZED));
+        d.add(newField("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.ANALYZED));
+        d.add(newField("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.ANALYZED));
+        d.add(newField("f3", "This field has payloads in some docs", Field.Store.NO, Field.Index.ANALYZED));
+        // add payload data for field f2 and f3
+        analyzer.setPayloadData("f2", "somedata".getBytes(), 0, 1);
+        analyzer.setPayloadData("f3", "somedata".getBytes(), 0, 3);
+        writer.addDocument(d);
+        // force merge
+        writer.optimize();
+        // flush
+        writer.close();
+
+        reader = SegmentReader.getOnlySegmentReader(ram);
+        fi = reader.fieldInfos();
+        assertFalse("Payload field bit should not be set.", fi.fieldInfo("f1").storePayloads);
+        assertTrue("Payload field bit should be set.", fi.fieldInfo("f2").storePayloads);
+        assertTrue("Payload field bit should be set.", fi.fieldInfo("f3").storePayloads);
+        reader.close();        
+        ram.close();
+    }
+
+    // Tests if payloads are correctly stored and loaded using both RamDirectory and FSDirectory
+    public void testPayloadsEncoding() throws Exception {
+        // first perform the test using a RAMDirectory
+        Directory dir = newDirectory();
+        performTest(dir);
+        dir.close();
+        // now use a FSDirectory and repeat same test
+        File dirName = _TestUtil.getTempDir("test_payloads");
+        dir = newFSDirectory(dirName);
+        performTest(dir);
+       _TestUtil.rmDir(dirName);
+        dir.close();
+    }
+    
+    // builds an index with payloads in the given Directory and performs
+    // different tests to verify the payload encoding
+    private void performTest(Directory dir) throws Exception {
+        PayloadAnalyzer analyzer = new PayloadAnalyzer();
+        IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+            TEST_VERSION_CURRENT, analyzer)
+            .setOpenMode(OpenMode.CREATE)
+            .setMergePolicy(newLogMergePolicy()));
+        
+        // should be in sync with value in TermInfosWriter
+        final int skipInterval = 16;
+        
+        final int numTerms = 5;
+        final String fieldName = "f1";
+        
+        int numDocs = skipInterval + 1; 
+        // create content for the test documents with just a few terms
+        Term[] terms = generateTerms(fieldName, numTerms);
+        StringBuilder sb = new StringBuilder();
+        for (int i = 0; i < terms.length; i++) {
+            sb.append(terms[i].text);
+            sb.append(" ");
+        }
+        String content = sb.toString();
+        
+        
+        int payloadDataLength = numTerms * numDocs * 2 + numTerms * numDocs * (numDocs - 1) / 2;
+        byte[] payloadData = generateRandomData(payloadDataLength);
+        
+        Document d = new Document();
+        d.add(newField(fieldName, content, Field.Store.NO, Field.Index.ANALYZED));
+        // add the same document multiple times to have the same payload lengths for all
+        // occurrences within two consecutive skip intervals
+        int offset = 0;
+        for (int i = 0; i < 2 * numDocs; i++) {
+            analyzer.setPayloadData(fieldName, payloadData, offset, 1);
+            offset += numTerms;
+            writer.addDocument(d);
+        }
+        
+        // make sure we create more than one segment to test merging
+        writer.commit();
+        
+        // now we make sure to have different payload lengths next at the next skip point        
+        for (int i = 0; i < numDocs; i++) {
+            analyzer.setPayloadData(fieldName, payloadData, offset, i);
+            offset += i * numTerms;
+            writer.addDocument(d);
+        }
+        
+        writer.optimize();
+        // flush
+        writer.close();
+        
+        
+        /*
+         * Verify the index
+         * first we test if all payloads are stored correctly
+         */        
+        IndexReader reader = IndexReader.open(dir, true);
+        
+        byte[] verifyPayloadData = new byte[payloadDataLength];
+        offset = 0;
+        TermPositions[] tps = new TermPositions[numTerms];
+        for (int i = 0; i < numTerms; i++) {
+            tps[i] = reader.termPositions(terms[i]);
+        }
+        
+        while (tps[0].next()) {
+            for (int i = 1; i < numTerms; i++) {
+                tps[i].next();
+            }
+            int freq = tps[0].freq();
+
+            for (int i = 0; i < freq; i++) {
+                for (int j = 0; j < numTerms; j++) {
+                    tps[j].nextPosition();
+                    if (tps[j].isPayloadAvailable()) {
+                      tps[j].getPayload(verifyPayloadData, offset);
+                      offset += tps[j].getPayloadLength();
+                    }
+                }
+            }
+        }
+        
+        for (int i = 0; i < numTerms; i++) {
+            tps[i].close();
+        }
+        
+        assertByteArrayEquals(payloadData, verifyPayloadData);
+        
+        /*
+         *  test lazy skipping
+         */        
+        TermPositions tp = reader.termPositions(terms[0]);
+        tp.next();
+        tp.nextPosition();
+        // now we don't read this payload
+        tp.nextPosition();
+        assertEquals("Wrong payload length.", 1, tp.getPayloadLength());
+        byte[] payload = tp.getPayload(null, 0);
+        assertEquals(payload[0], payloadData[numTerms]);
+        tp.nextPosition();
+        
+        // we don't read this payload and skip to a different document
+        tp.skipTo(5);
+        tp.nextPosition();
+        assertEquals("Wrong payload length.", 1, tp.getPayloadLength());
+        payload = tp.getPayload(null, 0);
+        assertEquals(payload[0], payloadData[5 * numTerms]);
+                
+        
+        /*
+         * Test different lengths at skip points
+         */
+        tp.seek(terms[1]);
+        tp.next();
+        tp.nextPosition();
+        assertEquals("Wrong payload length.", 1, tp.getPayloadLength());
+        tp.skipTo(skipInterval - 1);
+        tp.nextPosition();
+        assertEquals("Wrong payload length.", 1, tp.getPayloadLength());
+        tp.skipTo(2 * skipInterval - 1);
+        tp.nextPosition();
+        assertEquals("Wrong payload length.", 1, tp.getPayloadLength());
+        tp.skipTo(3 * skipInterval - 1);
+        tp.nextPosition();
+        assertEquals("Wrong payload length.", 3 * skipInterval - 2 * numDocs - 1, tp.getPayloadLength());
+        
+        /*
+         * Test multiple call of getPayload()
+         */
+        tp.getPayload(null, 0);
+        try {
+            // it is forbidden to call getPayload() more than once
+            // without calling nextPosition()
+            tp.getPayload(null, 0);
+            fail("Expected exception not thrown");
+        } catch (Exception expected) {
+            // expected exception
+        }
+        
+        reader.close();
+        
+        // test long payload
+        analyzer = new PayloadAnalyzer();
+        writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
+            analyzer).setOpenMode(OpenMode.CREATE));
+        String singleTerm = "lucene";
+        
+        d = new Document();
+        d.add(newField(fieldName, singleTerm, Field.Store.NO, Field.Index.ANALYZED));
+        // add a payload whose length is greater than the buffer size of BufferedIndexOutput
+        payloadData = generateRandomData(2000);
+        analyzer.setPayloadData(fieldName, payloadData, 100, 1500);
+        writer.addDocument(d);
+
+        
+        writer.optimize();
+        // flush
+        writer.close();
+        
+        reader = IndexReader.open(dir, true);
+        tp = reader.termPositions(new Term(fieldName, singleTerm));
+        tp.next();
+        tp.nextPosition();
+
+        verifyPayloadData = new byte[tp.getPayloadLength()];
+        tp.getPayload(verifyPayloadData, 0);
+        byte[] portion = new byte[1500];
+        System.arraycopy(payloadData, 100, portion, 0, 1500);
+        
+        assertByteArrayEquals(portion, verifyPayloadData);
+        reader.close();
+        
+    }
+    
+    private void generateRandomData(byte[] data) {
+      // this test needs the random data to be valid unicode
+      String s = _TestUtil.randomFixedByteLengthUnicodeString(random, data.length);
+      byte b[];
+      try {
+        b = s.getBytes("UTF-8");
+      } catch (UnsupportedEncodingException e) {
+        throw new RuntimeException(e);
+      }
+      assert b.length == data.length;
+      System.arraycopy(b, 0, data, 0, b.length);
+    }
+
+    private byte[] generateRandomData(int n) {
+        byte[] data = new byte[n];
+        generateRandomData(data);
+        return data;
+    }
+    
+    private Term[] generateTerms(String fieldName, int n) {
+        int maxDigits = (int) (Math.log(n) / Math.log(10));
+        Term[] terms = new Term[n];
+        StringBuilder sb = new StringBuilder();
+        for (int i = 0; i < n; i++) {
+            sb.setLength(0);
+            sb.append("t");
+            int zeros = maxDigits - (int) (Math.log(i) / Math.log(10));
+            for (int j = 0; j < zeros; j++) {
+                sb.append("0");
+            }
+            sb.append(i);
+            terms[i] = new Term(fieldName, sb.toString());
+        }
+        return terms;
+    }
+
+
+    void assertByteArrayEquals(byte[] b1, byte[] b2) {
+        if (b1.length != b2.length) {
+          fail("Byte arrays have different lengths: " + b1.length + ", " + b2.length);
+        }
+        
+        for (int i = 0; i < b1.length; i++) {
+          if (b1[i] != b2[i]) {
+            fail("Byte arrays different at index " + i + ": " + b1[i] + ", " + b2[i]);
+          }
+        }
+      }    
+    
+    
+    /**
+     * This Analyzer uses an WhitespaceTokenizer and PayloadFilter.
+     */
+    private static class PayloadAnalyzer extends Analyzer {
+        Map<String,PayloadData> fieldToData = new HashMap<String,PayloadData>();
+        
+        void setPayloadData(String field, byte[] data, int offset, int length) {
+            fieldToData.put(field, new PayloadData(0, data, offset, length));
+        }
+
+        void setPayloadData(String field, int numFieldInstancesToSkip, byte[] data, int offset, int length) {
+            fieldToData.put(field, new PayloadData(numFieldInstancesToSkip, data, offset, length));
+        }
+        
+        @Override
+        public TokenStream tokenStream(String fieldName, Reader reader) {
+            PayloadData payload =  fieldToData.get(fieldName);
+            TokenStream ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
+            if (payload != null) {
+                if (payload.numFieldInstancesToSkip == 0) {
+                    ts = new PayloadFilter(ts, payload.data, payload.offset, payload.length);
+                } else {
+                    payload.numFieldInstancesToSkip--;
+                }
+            }
+            return ts;
+        }
+        
+        private static class PayloadData {
+            byte[] data;
+            int offset;
+            int length;
+            int numFieldInstancesToSkip;
+            
+            PayloadData(int skip, byte[] data, int offset, int length) {
+                numFieldInstancesToSkip = skip;
+                this.data = data;
+                this.offset = offset;
+                this.length = length;
+            }
+        }
+    }
+
+    
+    /**
+     * This Filter adds payloads to the tokens.
+     */
+    private static class PayloadFilter extends TokenFilter {
+        private byte[] data;
+        private int length;
+        private int offset;
+        private int startOffset;
+        PayloadAttribute payloadAtt;
+        
+        public PayloadFilter(TokenStream in, byte[] data, int offset, int length) {
+            super(in);
+            this.data = data;
+            this.length = length;
+            this.offset = offset;
+            this.startOffset = offset;
+            payloadAtt = addAttribute(PayloadAttribute.class);
+        }
+        
+        @Override
+        public boolean incrementToken() throws IOException {
+            boolean hasNext = input.incrementToken();
+            if (hasNext) {
+                if (offset + length <= data.length) {
+                    Payload p = new Payload();
+                    payloadAtt.setPayload(p);
+                    p.setData(data, offset, length);
+                    offset += length;                
+                } else {
+                    payloadAtt.setPayload(null);
+                }
+            }
+            
+            return hasNext;
+        }
+
+      @Override
+      public void reset() throws IOException {
+        super.reset();
+        this.offset = startOffset;
+      }
+    }
+    
+    public void testThreadSafety() throws Exception {
+        final int numThreads = 5;
+        final int numDocs = atLeast(50);
+        final ByteArrayPool pool = new ByteArrayPool(numThreads, 5);
+        
+        Directory dir = newDirectory();
+        final IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( 
+            TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+        final String field = "test";
+        
+        Thread[] ingesters = new Thread[numThreads];
+        for (int i = 0; i < numThreads; i++) {
+            ingesters[i] = new Thread() {
+                @Override
+                public void run() {
+                    try {
+                        for (int j = 0; j < numDocs; j++) {
+                            Document d = new Document();
+                            d.add(new Field(field, new PoolingPayloadTokenStream(pool)));
+                            writer.addDocument(d);
+                        }
+                    } catch (Exception e) {
+                        e.printStackTrace();
+                        fail(e.toString());
+                    }
+                }
+            };
+            ingesters[i].start();
+        }
+        
+        for (int i = 0; i < numThreads; i++) {
+          ingesters[i].join();
+        }
+        writer.close();
+        IndexReader reader = IndexReader.open(dir, true);
+        TermEnum terms = reader.terms();
+        while (terms.next()) {
+            TermPositions tp = reader.termPositions(terms.term());
+            while(tp.next()) {
+                int freq = tp.freq();
+                for (int i = 0; i < freq; i++) {
+                    tp.nextPosition();
+                    byte payload[] = new byte[5];
+                    tp.getPayload(payload, 0);
+                    assertEquals(terms.term().text, new String(payload, 0, payload.length, "UTF-8"));
+                }
+            }
+            tp.close();
+        }
+        terms.close();
+        reader.close();
+        dir.close();
+        assertEquals(pool.size(), numThreads);
+    }
+    
+    private class PoolingPayloadTokenStream extends TokenStream {
+        private byte[] payload;
+        private boolean first;
+        private ByteArrayPool pool;
+        private String term;
+
+        CharTermAttribute termAtt;
+        PayloadAttribute payloadAtt;
+        
+        PoolingPayloadTokenStream(ByteArrayPool pool) {
+            this.pool = pool;
+            payload = pool.get();
+            generateRandomData(payload);
+            try {
+              term = new String(payload, 0, payload.length, "UTF-8");
+            } catch (UnsupportedEncodingException e) {
+              throw new RuntimeException(e);
+            }
+            first = true;
+            payloadAtt = addAttribute(PayloadAttribute.class);
+            termAtt = addAttribute(CharTermAttribute.class);
+        }
+        
+        @Override
+        public boolean incrementToken() throws IOException {
+            if (!first) return false;
+            first = false;
+            clearAttributes();
+            termAtt.append(term);
+            payloadAtt.setPayload(new Payload(payload));
+            return true;
+        }
+        
+        @Override
+        public void close() throws IOException {
+            pool.release(payload);
+        }
+        
+    }
+    
+    private static class ByteArrayPool {
+        private List<byte[]> pool;
+        
+        ByteArrayPool(int capacity, int size) {
+            pool = new ArrayList<byte[]>();
+            for (int i = 0; i < capacity; i++) {
+                pool.add(new byte[size]);
+            }
+        }
+    
+        synchronized byte[] get() {
+            return pool.remove(0);
+        }
+        
+        synchronized void release(byte[] b) {
+            pool.add(b);
+        }
+        
+        synchronized int size() {
+            return pool.size();
+        }
+    }
+
+  public void testAcrossFields() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir,
+                                                     new MockAnalyzer(random, MockTokenizer.WHITESPACE, true));
+    Document doc = new Document();
+    doc.add(new Field("hasMaybepayload", "here we go", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    writer.close();
+
+    writer = new RandomIndexWriter(random, dir,
+                                   new MockAnalyzer(random, MockTokenizer.WHITESPACE, true));
+    doc = new Document();
+    doc.add(new Field("hasMaybepayload2", "here we go", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    writer.addDocument(doc);
+    writer.optimize();
+    writer.close();
+
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java b/lucene/backwards/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
new file mode 100644
index 0000000..57f41ce
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
@@ -0,0 +1,278 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.Map;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
+
+public class TestPerSegmentDeletes extends LuceneTestCase {
+  public void testDeletes1() throws Exception {
+    //IndexWriter.debug2 = System.out;
+    Directory dir = new MockDirectoryWrapper(new Random(), new RAMDirectory());
+    IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_CURRENT,
+        new MockAnalyzer(random));
+    iwc.setMergeScheduler(new SerialMergeScheduler());
+    iwc.setMaxBufferedDocs(5000);
+    iwc.setRAMBufferSizeMB(100);
+    RangeMergePolicy fsmp = new RangeMergePolicy(false);
+    iwc.setMergePolicy(fsmp);
+    IndexWriter writer = new IndexWriter(dir, iwc);
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    for (int x = 0; x < 5; x++) {
+      writer.addDocument(DocHelper.createDocument(x, "1", 2));
+      //System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    //System.out.println("commit1");
+    writer.commit();
+    assertEquals(1, writer.segmentInfos.size());
+    for (int x = 5; x < 10; x++) {
+      writer.addDocument(DocHelper.createDocument(x, "2", 2));
+      //System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    //System.out.println("commit2");
+    writer.commit();
+    assertEquals(2, writer.segmentInfos.size());
+
+    for (int x = 10; x < 15; x++) {
+      writer.addDocument(DocHelper.createDocument(x, "3", 2));
+      //System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    
+    writer.deleteDocuments(new Term("id", "1"));
+    
+    writer.deleteDocuments(new Term("id", "11"));
+
+    // flushing without applying deletes means 
+    // there will still be deletes in the segment infos
+    writer.flush(false, false);
+    assertTrue(writer.bufferedDeletesStream.any());
+    
+    // get reader flushes pending deletes
+    // so there should not be anymore
+    IndexReader r1 = writer.getReader();
+    assertFalse(writer.bufferedDeletesStream.any());
+    r1.close();
+    
+    // delete id:2 from the first segment
+    // merge segments 0 and 1
+    // which should apply the delete id:2
+    writer.deleteDocuments(new Term("id", "2"));
+    writer.flush(false, false);
+    fsmp.doMerge = true;
+    fsmp.start = 0;
+    fsmp.length = 2;
+    writer.maybeMerge();
+    
+    assertEquals(2, writer.segmentInfos.size());
+    
+    // id:2 shouldn't exist anymore because
+    // it's been applied in the merge and now it's gone
+    IndexReader r2 = writer.getReader();
+    int[] id2docs = toDocsArray(new Term("id", "2"), r2);
+    assertTrue(id2docs == null);
+    r2.close();
+    
+    /**
+    // added docs are in the ram buffer
+    for (int x = 15; x < 20; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "4", 2));
+      System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    assertTrue(writer.numRamDocs() > 0);
+    // delete from the ram buffer
+    writer.deleteDocuments(new Term("id", Integer.toString(13)));
+    
+    Term id3 = new Term("id", Integer.toString(3));
+    
+    // delete from the 1st segment
+    writer.deleteDocuments(id3);
+    
+    assertTrue(writer.numRamDocs() > 0);
+    
+    //System.out
+    //    .println("segdels1:" + writer.docWriter.deletesToString());
+    
+    //assertTrue(writer.docWriter.segmentDeletes.size() > 0);
+    
+    // we cause a merge to happen
+    fsmp.doMerge = true;
+    fsmp.start = 0;
+    fsmp.length = 2;
+    System.out.println("maybeMerge "+writer.segmentInfos);
+    
+    SegmentInfo info0 = writer.segmentInfos.info(0);
+    SegmentInfo info1 = writer.segmentInfos.info(1);
+    
+    writer.maybeMerge();
+    System.out.println("maybeMerge after "+writer.segmentInfos);
+    // there should be docs in RAM
+    assertTrue(writer.numRamDocs() > 0);
+    
+    // assert we've merged the 1 and 2 segments
+    // and still have a segment leftover == 2
+    assertEquals(2, writer.segmentInfos.size());
+    assertFalse(segThere(info0, writer.segmentInfos));
+    assertFalse(segThere(info1, writer.segmentInfos));
+    
+    //System.out.println("segdels2:" + writer.docWriter.deletesToString());
+    
+    //assertTrue(writer.docWriter.segmentDeletes.size() > 0);
+    
+    IndexReader r = writer.getReader();
+    IndexReader r1 = r.getSequentialSubReaders()[0];
+    printDelDocs(r1.getDeletedDocs());
+    int[] docs = toDocsArray(id3, null, r);
+    System.out.println("id3 docs:"+Arrays.toString(docs));
+    // there shouldn't be any docs for id:3
+    assertTrue(docs == null);
+    r.close();
+    
+    part2(writer, fsmp);
+    **/
+    // System.out.println("segdels2:"+writer.docWriter.segmentDeletes.toString());
+    //System.out.println("close");
+    writer.close();
+    dir.close();
+  }
+  
+  /**
+  static boolean hasPendingDeletes(SegmentInfos infos) {
+    for (SegmentInfo info : infos) {
+      if (info.deletes.any()) {
+        return true;
+      }
+    }
+    return false;
+  }
+  **/
+  void part2(IndexWriter writer, RangeMergePolicy fsmp) throws Exception {
+    for (int x = 20; x < 25; x++) {
+      writer.addDocument(DocHelper.createDocument(x, "5", 2));
+      //System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    writer.flush(false, false);
+    for (int x = 25; x < 30; x++) {
+      writer.addDocument(DocHelper.createDocument(x, "5", 2));
+      //System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    writer.flush(false, false);
+    
+    //System.out.println("infos3:"+writer.segmentInfos);
+    
+    Term delterm = new Term("id", "8");
+    writer.deleteDocuments(delterm);
+    //System.out.println("segdels3:" + writer.docWriter.deletesToString());
+    
+    fsmp.doMerge = true;
+    fsmp.start = 1;
+    fsmp.length = 2;
+    writer.maybeMerge();
+    
+    // deletes for info1, the newly created segment from the 
+    // merge should have no deletes because they were applied in
+    // the merge
+    //SegmentInfo info1 = writer.segmentInfos.info(1);
+    //assertFalse(exists(info1, writer.docWriter.segmentDeletes));
+    
+    //System.out.println("infos4:"+writer.segmentInfos);
+    //System.out.println("segdels4:" + writer.docWriter.deletesToString());
+  }
+  
+  boolean segThere(SegmentInfo info, SegmentInfos infos) {
+    for (SegmentInfo si : infos) {
+      if (si.name.equals(info.name)) return true; 
+    }
+    return false;
+  }
+  
+  public static int[] toDocsArray(Term term, IndexReader reader)
+      throws IOException {
+    TermDocs termDocs = reader.termDocs();
+    termDocs.seek(term);
+    return toArray(termDocs);
+  }
+  
+  public static int[] toArray(TermDocs termDocs) throws IOException {
+    List<Integer> docs = new ArrayList<Integer>();
+    while (termDocs.next()) {
+      docs.add(termDocs.doc());
+    }
+    if (docs.size() == 0) {
+      return null;
+    } else {
+      return ArrayUtil.toIntArray(docs);
+    }
+  }
+  
+  public class RangeMergePolicy extends MergePolicy {
+    boolean doMerge = false;
+    int start;
+    int length;
+    
+    private final boolean useCompoundFile;
+    
+    private RangeMergePolicy(boolean useCompoundFile) {
+      this.useCompoundFile = useCompoundFile;
+    }
+    
+    @Override
+    public void close() {}
+    
+    @Override
+    public MergeSpecification findMerges(SegmentInfos segmentInfos)
+        throws CorruptIndexException, IOException {
+      MergeSpecification ms = new MergeSpecification();
+      if (doMerge) {
+        OneMerge om = new OneMerge(segmentInfos.asList().subList(start, start + length));
+        ms.add(om);
+        doMerge = false;
+        return ms;
+      }
+      return null;
+    }
+    
+    @Override
+    public MergeSpecification findMergesForOptimize(SegmentInfos segmentInfos,
+        int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToOptimize)
+        throws CorruptIndexException, IOException {
+      return null;
+    }
+    
+    @Override
+    public MergeSpecification findMergesToExpungeDeletes(
+        SegmentInfos segmentInfos) throws CorruptIndexException, IOException {
+      return null;
+    }
+    
+    @Override
+    public boolean useCompoundFile(SegmentInfos segments, SegmentInfo newSegment) {
+      return useCompoundFile;
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestPersistentSnapshotDeletionPolicy.java b/lucene/backwards/src/test/org/apache/lucene/index/TestPersistentSnapshotDeletionPolicy.java
new file mode 100644
index 0000000..b18acf2
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestPersistentSnapshotDeletionPolicy.java
@@ -0,0 +1,191 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.LockObtainFailedException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestPersistentSnapshotDeletionPolicy extends TestSnapshotDeletionPolicy {
+
+  // Keep it a class member so that getDeletionPolicy can use it
+  private Directory snapshotDir;
+  
+  // so we can close it if called by SDP tests
+  private PersistentSnapshotDeletionPolicy psdp;
+  
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    snapshotDir = newDirectory();
+  }
+  
+  @After
+  @Override
+  public void tearDown() throws Exception {
+    if (psdp != null) psdp.close();
+    snapshotDir.close();
+    super.tearDown();
+  }
+  
+  @Override
+  protected SnapshotDeletionPolicy getDeletionPolicy() throws IOException {
+    if (psdp != null) psdp.close();
+    snapshotDir.close();
+    snapshotDir = newDirectory();
+    return psdp = new PersistentSnapshotDeletionPolicy(
+        new KeepOnlyLastCommitDeletionPolicy(), snapshotDir, OpenMode.CREATE,
+        TEST_VERSION_CURRENT);
+  }
+
+  @Override
+  protected SnapshotDeletionPolicy getDeletionPolicy(Map<String, String> snapshots) throws IOException {
+    SnapshotDeletionPolicy sdp = getDeletionPolicy();
+    if (snapshots != null) {
+      for (Entry<String, String> e: snapshots.entrySet()) {
+        sdp.registerSnapshotInfo(e.getKey(), e.getValue(), null);
+      }
+    }
+    return sdp;
+  }
+
+  @Override
+  @Test
+  public void testExistingSnapshots() throws Exception {
+    int numSnapshots = 3;
+    Directory dir = newDirectory();
+    PersistentSnapshotDeletionPolicy psdp = (PersistentSnapshotDeletionPolicy) getDeletionPolicy();
+    IndexWriter writer = new IndexWriter(dir, getConfig(random, psdp));
+    prepareIndexAndSnapshots(psdp, writer, numSnapshots, "snapshot");
+    writer.close();
+    psdp.close();
+
+    // Re-initialize and verify snapshots were persisted
+    psdp = new PersistentSnapshotDeletionPolicy(
+        new KeepOnlyLastCommitDeletionPolicy(), snapshotDir, OpenMode.APPEND,
+        TEST_VERSION_CURRENT);
+    new IndexWriter(dir, getConfig(random, psdp)).close();
+
+    assertSnapshotExists(dir, psdp, numSnapshots);
+    assertEquals(numSnapshots, psdp.getSnapshots().size());
+    psdp.close();
+    dir.close();
+  }
+
+  @Test(expected=IllegalArgumentException.class)
+  public void testIllegalSnapshotId() throws Exception {
+    getDeletionPolicy().snapshot("$SNAPSHOTS_DOC$");
+  }
+  
+  @Test
+  public void testInvalidSnapshotInfos() throws Exception {
+    // Add the correct number of documents (1), but without snapshot information
+    IndexWriter writer = new IndexWriter(snapshotDir, getConfig(random, null));
+    writer.addDocument(new Document());
+    writer.close();
+    try {
+      new PersistentSnapshotDeletionPolicy(
+          new KeepOnlyLastCommitDeletionPolicy(), snapshotDir, OpenMode.APPEND,
+          TEST_VERSION_CURRENT);
+      fail("should not have succeeded to read from an invalid Directory");
+    } catch (IllegalStateException e) {
+    }
+  }
+
+  @Test
+  public void testNoSnapshotInfos() throws Exception {
+    // Initialize an empty index in snapshotDir - PSDP should initialize successfully.
+    new IndexWriter(snapshotDir, getConfig(random, null)).close();
+    new PersistentSnapshotDeletionPolicy(
+        new KeepOnlyLastCommitDeletionPolicy(), snapshotDir, OpenMode.APPEND,
+        TEST_VERSION_CURRENT).close();
+  }
+
+  @Test(expected=IllegalStateException.class)
+  public void testTooManySnapshotInfos() throws Exception {
+    // Write two documents to the snapshots directory - illegal.
+    IndexWriter writer = new IndexWriter(snapshotDir, getConfig(random, null));
+    writer.addDocument(new Document());
+    writer.addDocument(new Document());
+    writer.close();
+    
+    new PersistentSnapshotDeletionPolicy(
+        new KeepOnlyLastCommitDeletionPolicy(), snapshotDir, OpenMode.APPEND,
+        TEST_VERSION_CURRENT).close();
+    fail("should not have succeeded to open an invalid directory");
+  }
+
+  @Test
+  public void testSnapshotRelease() throws Exception {
+    Directory dir = newDirectory();
+    PersistentSnapshotDeletionPolicy psdp = (PersistentSnapshotDeletionPolicy) getDeletionPolicy();
+    IndexWriter writer = new IndexWriter(dir, getConfig(random, psdp));
+    prepareIndexAndSnapshots(psdp, writer, 1, "snapshot");
+    writer.close();
+
+    psdp.release("snapshot0");
+    psdp.close();
+
+    psdp = new PersistentSnapshotDeletionPolicy(
+        new KeepOnlyLastCommitDeletionPolicy(), snapshotDir, OpenMode.APPEND,
+        TEST_VERSION_CURRENT);
+    assertEquals("Should have no snapshots !", 0, psdp.getSnapshots().size());
+    psdp.close();
+    dir.close();
+  }
+
+  @Test
+  public void testStaticRead() throws Exception {
+    // While PSDP is open, it keeps a lock on the snapshots directory and thus
+    // prevents reading the snapshots information. This test checks that the 
+    // static read method works.
+    int numSnapshots = 1;
+    Directory dir = newDirectory();
+    PersistentSnapshotDeletionPolicy psdp = (PersistentSnapshotDeletionPolicy) getDeletionPolicy();
+    IndexWriter writer = new IndexWriter(dir, getConfig(random, psdp));
+    prepareIndexAndSnapshots(psdp, writer, numSnapshots, "snapshot");
+    writer.close();
+    dir.close();
+    
+    try {
+      // This should fail, since the snapshots directory is locked - we didn't close it !
+      new PersistentSnapshotDeletionPolicy(
+          new KeepOnlyLastCommitDeletionPolicy(), snapshotDir, OpenMode.APPEND,
+          TEST_VERSION_CURRENT);
+     fail("should not have reached here - the snapshots directory should be locked!");
+    } catch (LockObtainFailedException e) {
+      // expected
+    } finally {
+      psdp.close();
+    }
+    
+    // Reading the snapshots info should succeed though
+    Map<String, String> snapshots = PersistentSnapshotDeletionPolicy.readSnapshotsInfo(snapshotDir);
+    assertEquals("expected " + numSnapshots + " snapshots, got " + snapshots.size(), numSnapshots, snapshots.size());
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestPositionBasedTermVectorMapper.java b/lucene/backwards/src/test/org/apache/lucene/index/TestPositionBasedTermVectorMapper.java
new file mode 100644
index 0000000..814f4eb
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestPositionBasedTermVectorMapper.java
@@ -0,0 +1,99 @@
+package org.apache.lucene.index;
+/**
+ * Copyright 2005 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+
+import java.io.IOException;
+import java.util.BitSet;
+import java.util.Map;
+
+public class TestPositionBasedTermVectorMapper extends LuceneTestCase {
+  protected String[] tokens;
+  protected int[][] thePositions;
+  protected TermVectorOffsetInfo[][] offsets;
+  protected int numPositions;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    tokens = new String[]{"here", "is", "some", "text", "to", "test", "extra"};
+    thePositions = new int[tokens.length][];
+    offsets = new TermVectorOffsetInfo[tokens.length][];
+    numPositions = 0;
+    //save off the last one so we can add it with the same positions as some of the others, but in a predictable way
+    for (int i = 0; i < tokens.length - 1; i++)
+    {
+      thePositions[i] = new int[2 * i + 1];//give 'em all some positions
+      for (int j = 0; j < thePositions[i].length; j++)
+      {
+        thePositions[i][j] = numPositions++;
+      }
+      offsets[i] = new TermVectorOffsetInfo[thePositions[i].length];
+      for (int j = 0; j < offsets[i].length; j++) {
+        offsets[i][j] = new TermVectorOffsetInfo(j, j + 1);//the actual value here doesn't much matter
+      }
+    }
+    thePositions[tokens.length - 1] = new int[1];
+    thePositions[tokens.length - 1][0] = 0;//put this at the same position as "here"
+    offsets[tokens.length - 1] = new TermVectorOffsetInfo[1];
+    offsets[tokens.length - 1][0] = new TermVectorOffsetInfo(0, 1);
+  }
+
+  public void test() throws IOException {
+    PositionBasedTermVectorMapper mapper = new PositionBasedTermVectorMapper();
+    
+    mapper.setExpectations("test", tokens.length, true, true);
+    //Test single position
+    for (int i = 0; i < tokens.length; i++) {
+      String token = tokens[i];
+      mapper.map(token, 1, null, thePositions[i]);
+
+    }
+    Map<String,Map<Integer,PositionBasedTermVectorMapper.TVPositionInfo>> map = mapper.getFieldToTerms();
+    assertTrue("map is null and it shouldn't be", map != null);
+    assertTrue("map Size: " + map.size() + " is not: " + 1, map.size() == 1);
+    Map<Integer,PositionBasedTermVectorMapper.TVPositionInfo> positions = map.get("test");
+    assertTrue("thePositions is null and it shouldn't be", positions != null);
+    
+    assertTrue("thePositions Size: " + positions.size() + " is not: " + numPositions, positions.size() == numPositions);
+    BitSet bits = new BitSet(numPositions);
+    for (Map.Entry<Integer,PositionBasedTermVectorMapper.TVPositionInfo> entry : positions.entrySet()) {
+    
+      PositionBasedTermVectorMapper.TVPositionInfo info = entry.getValue();
+      assertTrue("info is null and it shouldn't be", info != null);
+      int pos = entry.getKey().intValue();
+      bits.set(pos);
+      assertTrue(info.getPosition() + " does not equal: " + pos, info.getPosition() == pos);
+      assertTrue("info.getOffsets() is null and it shouldn't be", info.getOffsets() != null);
+      if (pos == 0)
+      {
+        assertTrue("info.getTerms() Size: " + info.getTerms().size() + " is not: " + 2, info.getTerms().size() == 2);//need a test for multiple terms at one pos
+        assertTrue("info.getOffsets() Size: " + info.getOffsets().size() + " is not: " + 2, info.getOffsets().size() == 2);
+      }
+      else
+      {
+        assertTrue("info.getTerms() Size: " + info.getTerms().size() + " is not: " + 1, info.getTerms().size() == 1);//need a test for multiple terms at one pos
+        assertTrue("info.getOffsets() Size: " + info.getOffsets().size() + " is not: " + 1, info.getOffsets().size() == 1);
+      }
+    }
+    assertTrue("Bits are not all on", bits.cardinality() == numPositions);
+  }
+
+
+
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestPrefixCodedTerms.java b/lucene/backwards/src/test/org/apache/lucene/index/TestPrefixCodedTerms.java
new file mode 100644
index 0000000..9578436
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestPrefixCodedTerms.java
@@ -0,0 +1,136 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestPrefixCodedTerms extends LuceneTestCase {
+  
+  public void testEmpty() {
+    PrefixCodedTerms.Builder b = new PrefixCodedTerms.Builder();
+    PrefixCodedTerms pb = b.finish();
+    assertFalse(pb.iterator().hasNext());
+  }
+  
+  public void testOne() {
+    Term term = new Term("foo", "bogus");
+    PrefixCodedTerms.Builder b = new PrefixCodedTerms.Builder();
+    b.add(term);
+    PrefixCodedTerms pb = b.finish();
+    Iterator<Term> iterator = pb.iterator();
+    assertTrue(iterator.hasNext());
+    assertEquals(term, iterator.next());
+  }
+  
+  public void testRandom() {
+    Set<Term> terms = new TreeSet<Term>();
+    int nterms = atLeast(10000);
+    for (int i = 0; i < nterms; i++) {
+      Term term = new Term(_TestUtil.randomUnicodeString(random, 2), _TestUtil.randomUnicodeString(random));
+      terms.add(term);
+    }    
+    
+    PrefixCodedTerms.Builder b = new PrefixCodedTerms.Builder();
+    for (Term ref: terms) {
+      b.add(ref);
+    }
+    PrefixCodedTerms pb = b.finish();
+    
+    Iterator<Term> expected = terms.iterator();
+    for (Term t : pb) {
+      assertTrue(expected.hasNext());
+      assertEquals(expected.next(), t);
+    }
+    assertFalse(expected.hasNext());
+  }
+  
+  public void testMergeEmpty() {
+    List<Iterator<Term>> subs = Collections.emptyList();
+    assertFalse(CoalescedDeletes.mergedIterator(subs).hasNext());
+
+    subs = new ArrayList<Iterator<Term>>();
+    subs.add(new PrefixCodedTerms.Builder().finish().iterator());
+    subs.add(new PrefixCodedTerms.Builder().finish().iterator());
+    Iterator<Term> merged = CoalescedDeletes.mergedIterator(subs);
+    assertFalse(merged.hasNext());
+  }
+
+  public void testMergeOne() {
+    Term t1 = new Term("foo", "a");
+    PrefixCodedTerms.Builder b1 = new PrefixCodedTerms.Builder();
+    b1.add(t1);
+    PrefixCodedTerms pb1 = b1.finish();
+    
+    Term t2 = new Term("foo", "b");
+    PrefixCodedTerms.Builder b2 = new PrefixCodedTerms.Builder();
+    b2.add(t2);
+    PrefixCodedTerms pb2 = b2.finish();
+    
+    List<Iterator<Term>> subs = new ArrayList<Iterator<Term>>();
+    subs.add(pb1.iterator());
+    subs.add(pb2.iterator());
+    
+    Iterator<Term> merged = CoalescedDeletes.mergedIterator(subs);
+    assertTrue(merged.hasNext());
+    assertEquals(t1, merged.next());
+    assertTrue(merged.hasNext());
+    assertEquals(t2, merged.next());
+  }
+
+  public void testMergeRandom() {
+    PrefixCodedTerms pb[] = new PrefixCodedTerms[_TestUtil.nextInt(random, 2, 10)];
+    Set<Term> superSet = new TreeSet<Term>();
+    
+    for (int i = 0; i < pb.length; i++) {
+      Set<Term> terms = new TreeSet<Term>();
+      int nterms = _TestUtil.nextInt(random, 0, 10000);
+      for (int j = 0; j < nterms; j++) {
+        Term term = new Term(_TestUtil.randomUnicodeString(random, 2), _TestUtil.randomUnicodeString(random, 4));
+        terms.add(term);
+      }
+      superSet.addAll(terms);
+    
+      PrefixCodedTerms.Builder b = new PrefixCodedTerms.Builder();
+      for (Term ref: terms) {
+        b.add(ref);
+      }
+      pb[i] = b.finish();
+    }
+    
+    List<Iterator<Term>> subs = new ArrayList<Iterator<Term>>();
+    for (int i = 0; i < pb.length; i++) {
+      subs.add(pb[i].iterator());
+    }
+    
+    Iterator<Term> expected = superSet.iterator();
+    Iterator<Term> actual = CoalescedDeletes.mergedIterator(subs);
+    while (actual.hasNext()) {
+      assertTrue(expected.hasNext());
+      assertEquals(expected.next(), actual.next());
+    }
+    assertFalse(expected.hasNext());
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestRollback.java b/lucene/backwards/src/test/org/apache/lucene/index/TestRollback.java
new file mode 100644
index 0000000..eb3ea10
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestRollback.java
@@ -0,0 +1,61 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestRollback extends LuceneTestCase {
+
+  // LUCENE-2536
+  public void testRollbackIntegrityWithBufferFlush() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter rw = new RandomIndexWriter(random, dir);
+
+    for (int i = 0; i < 5; i++) {
+      Document doc = new Document();
+      doc.add(newField("pk", Integer.toString(i), Store.YES, Index.ANALYZED_NO_NORMS));
+      rw.addDocument(doc);
+    }
+    rw.close();
+
+    // If buffer size is small enough to cause a flush, errors ensue...
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2).setOpenMode(IndexWriterConfig.OpenMode.APPEND));
+
+    Term pkTerm = new Term("pk", "");
+    for (int i = 0; i < 3; i++) {
+      Document doc = new Document();
+      String value = Integer.toString(i);
+      doc.add(newField("pk", value, Store.YES, Index.ANALYZED_NO_NORMS));
+      doc.add(newField("text", "foo", Store.YES, Index.ANALYZED_NO_NORMS));
+      w.updateDocument(pkTerm.createTerm(value), doc);
+    }
+    w.rollback();
+
+    IndexReader r = IndexReader.open(dir, true);
+    assertEquals("index should contain same number of docs post rollback", 5, r.numDocs());
+    r.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestRollingUpdates.java b/lucene/backwards/src/test/org/apache/lucene/index/TestRollingUpdates.java
new file mode 100644
index 0000000..69e9ddb
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestRollingUpdates.java
@@ -0,0 +1,145 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.*;
+import org.apache.lucene.store.*;
+import org.apache.lucene.util.*;
+import org.junit.Test;
+
+public class TestRollingUpdates extends LuceneTestCase {
+
+  // Just updates the same set of N docs over and over, to
+  // stress out deletions
+
+  @Test
+  public void testRollingUpdates() throws Exception {
+    final Directory dir = newDirectory();
+
+    final LineFileDocs docs = new LineFileDocs(random);
+
+    final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    final int SIZE = atLeast(20);
+    int id = 0;
+    IndexReader r = null;
+    final int numUpdates = (int) (SIZE * (2+random.nextDouble()));
+    for(int docIter=0;docIter<numUpdates;docIter++) {
+      final Document doc = docs.nextDoc();
+      final String myID = ""+id;
+      if (id == SIZE-1) {
+        id = 0;
+      } else {
+        id++;
+      }
+      doc.getField("docid").setValue(myID);
+      w.updateDocument(new Term("docid", myID), doc);
+
+      if (docIter >= SIZE && random.nextInt(50) == 17) {
+        if (r != null) {
+          r.close();
+        }
+        final boolean applyDeletions = random.nextBoolean();
+        r = w.getReader(applyDeletions);
+        assertTrue("applyDeletions=" + applyDeletions + " r.numDocs()=" + r.numDocs() + " vs SIZE=" + SIZE, !applyDeletions || r.numDocs() == SIZE);
+      }
+    }
+
+    if (r != null) {
+      r.close();
+    }
+
+    w.commit();
+    assertEquals(SIZE, w.numDocs());
+
+    w.close();
+    docs.close();
+    
+    dir.close();
+  }
+  
+  
+  public void testUpdateSameDoc() throws Exception {
+    final Directory dir = newDirectory();
+
+    final LineFileDocs docs = new LineFileDocs(random);
+    for (int r = 0; r < 3; r++) {
+      final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
+          TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
+      final int numUpdates = atLeast(20);
+      int numThreads = _TestUtil.nextInt(random, 2, 6);
+      IndexingThread[] threads = new IndexingThread[numThreads];
+      for (int i = 0; i < numThreads; i++) {
+        threads[i] = new IndexingThread(docs, w, numUpdates);
+        threads[i].start();
+      }
+
+      for (int i = 0; i < numThreads; i++) {
+        threads[i].join();
+      }
+
+      w.close();
+    }
+
+    IndexReader open = IndexReader.open(dir);
+    assertEquals(1, open.numDocs());
+    open.close();
+    docs.close();
+    dir.close();
+  }
+  
+  static class IndexingThread extends Thread {
+    final LineFileDocs docs;
+    final IndexWriter writer;
+    final int num;
+    
+    public IndexingThread(LineFileDocs docs, IndexWriter writer, int num) {
+      super();
+      this.docs = docs;
+      this.writer = writer;
+      this.num = num;
+    }
+
+    public void run() {
+      try {
+        IndexReader open = null;
+        for (int i = 0; i < num; i++) {
+          Document doc = new Document();// docs.nextDoc();
+          doc.add(newField("id", "test", Field.Index.NOT_ANALYZED));
+          writer.updateDocument(new Term("id", "test"), doc);
+          if (random.nextInt(3) == 0) {
+            if (open == null) {
+              open = IndexReader.open(writer, true);
+            }
+            IndexReader reader = open.reopen();
+            if (reader != open) {
+              open.close();
+              open = reader;
+            }
+            assertEquals("iter: " + i + " numDocs: "+ open.numDocs() + " del: " + open.numDeletedDocs() + " max: " + open.maxDoc(), 1, open.numDocs());
+          }
+        }
+        if (open != null) {
+          open.close();
+        }
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      }
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java b/lucene/backwards/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java
new file mode 100644
index 0000000..8097c13
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java
@@ -0,0 +1,105 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.ReusableAnalyzerBase;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestSameTokenSamePosition extends LuceneTestCase {
+
+  /**
+   * Attempt to reproduce an assertion error that happens
+   * only with the trunk version around April 2011.
+   */
+  public void test() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter riw = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new BugReproAnalyzer()));
+    Document doc = new Document();
+    doc.add(new Field("eng", "Six drunken" /*This shouldn't matter. */, 
+                      Field.Store.YES, Field.Index.ANALYZED));
+    riw.addDocument(doc);
+    riw.close();
+    dir.close();
+  }
+  
+  /**
+   * Same as the above, but with more docs
+   */
+  public void testMoreDocs() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter riw = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new BugReproAnalyzer()));
+    Document doc = new Document();
+    doc.add(new Field("eng", "Six drunken" /*This shouldn't matter. */, 
+                      Field.Store.YES, Field.Index.ANALYZED));
+    for (int i = 0; i < 100; i++) {
+      riw.addDocument(doc);
+    }
+    riw.close();
+    dir.close();
+  }
+}
+
+final class BugReproAnalyzer extends Analyzer{
+  @Override
+  public TokenStream tokenStream(String arg0, Reader arg1) {
+    return new BugReproAnalyzerTokenizer();
+  }
+}
+
+final class BugReproAnalyzerTokenizer extends Tokenizer {
+  private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+  private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+  private final PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
+  private final int tokenCount = 4;
+  private int nextTokenIndex = 0;
+  private final String terms[] = new String[]{"six", "six", "drunken", "drunken"};
+  private final int starts[] = new int[]{0, 0, 4, 4};
+  private final int ends[] = new int[]{3, 3, 11, 11};
+  private final int incs[] = new int[]{1, 0, 1, 0};
+
+  @Override
+  public boolean incrementToken() throws IOException {
+    if (nextTokenIndex < tokenCount) {
+      termAtt.setEmpty().append(terms[nextTokenIndex]);
+      offsetAtt.setOffset(starts[nextTokenIndex], ends[nextTokenIndex]);
+      posIncAtt.setPositionIncrement(incs[nextTokenIndex]);
+      nextTokenIndex++;
+      return true;			
+    } else {
+      return false;
+    }
+  }
+
+  @Override
+  public void reset() throws IOException {
+    super.reset();
+    this.nextTokenIndex = 0;
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestSegmentInfo.java b/lucene/backwards/src/test/org/apache/lucene/index/TestSegmentInfo.java
new file mode 100644
index 0000000..13cf539
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestSegmentInfo.java
@@ -0,0 +1,90 @@
+package org.apache.lucene.index;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestSegmentInfo extends LuceneTestCase {
+
+  public void testSizeInBytesCache() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy());
+    IndexWriter writer = new IndexWriter(dir, conf);
+    Document doc = new Document();
+    doc.add(new Field("a", "value", Store.YES, Index.ANALYZED));
+    writer.addDocument(doc);
+    writer.close();
+    
+    SegmentInfos sis = new SegmentInfos();
+    sis.read(dir);
+    SegmentInfo si = sis.info(0);
+    long sizeInBytesNoStore = si.sizeInBytes(false);
+    long sizeInBytesWithStore = si.sizeInBytes(true);
+    assertTrue("sizeInBytesNoStore=" + sizeInBytesNoStore + " sizeInBytesWithStore=" + sizeInBytesWithStore, sizeInBytesWithStore > sizeInBytesNoStore);
+    dir.close();
+  }
+  
+  // LUCENE-2584: calling files() by multiple threads could lead to ConcurrentModificationException
+  public void testFilesConcurrency() throws Exception {
+    Directory dir = newDirectory();
+    // Create many files
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    IndexWriter writer = new IndexWriter(dir, conf);
+    Document doc = new Document();
+    doc.add(new Field("a", "b", Store.YES, Index.ANALYZED, TermVector.YES));
+    writer.addDocument(doc);
+    writer.close();
+    
+    SegmentInfos sis = new SegmentInfos();
+    sis.read(dir);
+    final SegmentInfo si = sis.info(0);
+    Thread[] threads = new Thread[_TestUtil.nextInt(random, 2, 5)];
+    for (int i = 0; i < threads.length; i++) {
+      threads[i] = new Thread() {
+        @Override
+        public void run() {
+          try {
+            // Verify that files() does not throw an exception and that the
+            // iteration afterwards succeeds.
+            Iterator<String> iter = si.files().iterator();
+            while (iter.hasNext()) iter.next();
+          } catch (IOException e) {
+            throw new RuntimeException(e);
+          }
+        }
+      };
+    }
+    
+    for (Thread t : threads) t.start();
+    for (Thread t : threads) t.join();
+    
+    dir.close();
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestSegmentMerger.java b/lucene/backwards/src/test/org/apache/lucene/index/TestSegmentMerger.java
new file mode 100644
index 0000000..dc234c8
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestSegmentMerger.java
@@ -0,0 +1,178 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.BufferedIndexInput;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+
+import java.io.IOException;
+import java.util.Collection;
+
+public class TestSegmentMerger extends LuceneTestCase {
+  //The variables for the new merged segment
+  private Directory mergedDir;
+  private String mergedSegment = "test";
+  //First segment to be merged
+  private Directory merge1Dir;
+  private Document doc1 = new Document();
+  private SegmentReader reader1 = null;
+  //Second Segment to be merged
+  private Directory merge2Dir;
+  private Document doc2 = new Document();
+  private SegmentReader reader2 = null;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    mergedDir = newDirectory();
+    merge1Dir = newDirectory();
+    merge2Dir = newDirectory();
+    DocHelper.setupDoc(doc1);
+    SegmentInfo info1 = DocHelper.writeDoc(random, merge1Dir, doc1);
+    DocHelper.setupDoc(doc2);
+    SegmentInfo info2 = DocHelper.writeDoc(random, merge2Dir, doc2);
+    reader1 = SegmentReader.get(true, info1, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+    reader2 = SegmentReader.get(true, info2, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    reader1.close();
+    reader2.close();
+    mergedDir.close();
+    merge1Dir.close();
+    merge2Dir.close();
+    super.tearDown();
+  }
+
+  public void test() {
+    assertTrue(mergedDir != null);
+    assertTrue(merge1Dir != null);
+    assertTrue(merge2Dir != null);
+    assertTrue(reader1 != null);
+    assertTrue(reader2 != null);
+  }
+  
+  public void testMerge() throws IOException {                             
+    SegmentMerger merger = new SegmentMerger(mergedDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL, mergedSegment, null, null, new FieldInfos());
+    merger.add(reader1);
+    merger.add(reader2);
+    int docsMerged = merger.merge();
+    assertTrue(docsMerged == 2);
+    //Should be able to open a new SegmentReader against the new directory
+    SegmentReader mergedReader = SegmentReader.get(false, mergedDir,
+                                                   new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, true,
+                                                                   merger.fieldInfos().hasProx(),
+                                                                   merger.fieldInfos().hasVectors()),
+                                                   BufferedIndexInput.BUFFER_SIZE, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+
+    assertTrue(mergedReader != null);
+    assertTrue(mergedReader.numDocs() == 2);
+    Document newDoc1 = mergedReader.document(0);
+    assertTrue(newDoc1 != null);
+    //There are 2 unstored fields on the document
+    assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size());
+    Document newDoc2 = mergedReader.document(1);
+    assertTrue(newDoc2 != null);
+    assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
+    
+    TermDocs termDocs = mergedReader.termDocs(new Term(DocHelper.TEXT_FIELD_2_KEY, "field"));
+    assertTrue(termDocs != null);
+    assertTrue(termDocs.next() == true);
+    
+    Collection<String> stored = mergedReader.getFieldNames(IndexReader.FieldOption.INDEXED_WITH_TERMVECTOR);
+    assertTrue(stored != null);
+    //System.out.println("stored size: " + stored.size());
+    assertTrue("We do not have 3 fields that were indexed with term vector",stored.size() == 3);
+    
+    TermFreqVector vector = mergedReader.getTermFreqVector(0, DocHelper.TEXT_FIELD_2_KEY);
+    assertTrue(vector != null);
+    String [] terms = vector.getTerms();
+    assertTrue(terms != null);
+    //System.out.println("Terms size: " + terms.length);
+    assertTrue(terms.length == 3);
+    int [] freqs = vector.getTermFrequencies();
+    assertTrue(freqs != null);
+    //System.out.println("Freqs size: " + freqs.length);
+    assertTrue(vector instanceof TermPositionVector == true);
+    
+    for (int i = 0; i < terms.length; i++) {
+      String term = terms[i];
+      int freq = freqs[i];
+      //System.out.println("Term: " + term + " Freq: " + freq);
+      assertTrue(DocHelper.FIELD_2_TEXT.indexOf(term) != -1);
+      assertTrue(DocHelper.FIELD_2_FREQS[i] == freq);
+    }
+
+    TestSegmentReader.checkNorms(mergedReader);
+    mergedReader.close();
+  }
+  
+  // LUCENE-3143
+  public void testInvalidFilesToCreateCompound() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    IndexWriter w = new IndexWriter(dir, iwc);
+    
+    // Create an index w/ .del file
+    w.addDocument(new Document());
+    Document doc = new Document();
+    doc.add(new Field("c", "test", Store.NO, Index.ANALYZED));
+    w.addDocument(doc);
+    w.commit();
+    w.deleteDocuments(new Term("c", "test"));
+    w.close();
+    
+    // Assert that SM fails if .del exists
+    SegmentMerger sm = new SegmentMerger(dir, 1, "a", null, null, null);
+    try {
+      sm.createCompoundFile("b1", w.segmentInfos.info(0));
+      fail("should not have been able to create a .cfs with .del and .s* files");
+    } catch (AssertionError e) {
+      // expected
+    }
+    
+    // Create an index w/ .s*
+    w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
+    doc = new Document();
+    doc.add(new Field("c", "test", Store.NO, Index.ANALYZED));
+    w.addDocument(doc);
+    w.close();
+    IndexReader r = IndexReader.open(dir, false);
+    r.setNorm(0, "c", (byte) 1);
+    r.close();
+    
+    // Assert that SM fails if .s* exists
+    try {
+      sm.createCompoundFile("b2", w.segmentInfos.info(0));
+      fail("should not have been able to create a .cfs with .del and .s* files");
+    } catch (AssertionError e) {
+      // expected
+    }
+
+    dir.close();
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestSegmentReader.java b/lucene/backwards/src/test/org/apache/lucene/index/TestSegmentReader.java
new file mode 100644
index 0000000..be5df47
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestSegmentReader.java
@@ -0,0 +1,203 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.store.Directory;
+
+public class TestSegmentReader extends LuceneTestCase {
+  private Directory dir;
+  private Document testDoc = new Document();
+  private SegmentReader reader = null;
+  
+  //TODO: Setup the reader w/ multiple documents
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    dir = newDirectory();
+    DocHelper.setupDoc(testDoc);
+    SegmentInfo info = DocHelper.writeDoc(random, dir, testDoc);
+    reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    reader.close();
+    dir.close();
+    super.tearDown();
+  }
+
+  public void test() {
+    assertTrue(dir != null);
+    assertTrue(reader != null);
+    assertTrue(DocHelper.nameValues.size() > 0);
+    assertTrue(DocHelper.numFields(testDoc) == DocHelper.all.size());
+  }
+  
+  public void testDocument() throws IOException {
+    assertTrue(reader.numDocs() == 1);
+    assertTrue(reader.maxDoc() >= 1);
+    Document result = reader.document(0);
+    assertTrue(result != null);
+    //There are 2 unstored fields on the document that are not preserved across writing
+    assertTrue(DocHelper.numFields(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size());
+    
+    List<Fieldable> fields = result.getFields();
+    for (final Fieldable field : fields ) { 
+      assertTrue(field != null);
+      assertTrue(DocHelper.nameValues.containsKey(field.name()));
+    }
+  }
+  
+  public void testDelete() throws IOException {
+    Document docToDelete = new Document();
+    DocHelper.setupDoc(docToDelete);
+    SegmentInfo info = DocHelper.writeDoc(random, dir, docToDelete);
+    SegmentReader deleteReader = SegmentReader.get(false, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+    assertTrue(deleteReader != null);
+    assertTrue(deleteReader.numDocs() == 1);
+    deleteReader.deleteDocument(0);
+    assertTrue(deleteReader.isDeleted(0) == true);
+    assertTrue(deleteReader.hasDeletions() == true);
+    assertTrue(deleteReader.numDocs() == 0);
+    deleteReader.close();
+  }    
+  
+  public void testGetFieldNameVariations() {
+    Collection<String> result = reader.getFieldNames(IndexReader.FieldOption.ALL);
+    assertTrue(result != null);
+    assertTrue(result.size() == DocHelper.all.size());
+    for (Iterator<String> iter = result.iterator(); iter.hasNext();) {
+      String s =  iter.next();
+      //System.out.println("Name: " + s);
+      assertTrue(DocHelper.nameValues.containsKey(s) == true || s.equals(""));
+    }                                                                               
+    result = reader.getFieldNames(IndexReader.FieldOption.INDEXED);
+    assertTrue(result != null);
+    assertTrue(result.size() == DocHelper.indexed.size());
+    for (Iterator<String> iter = result.iterator(); iter.hasNext();) {
+      String s = iter.next();
+      assertTrue(DocHelper.indexed.containsKey(s) == true || s.equals(""));
+    }
+    
+    result = reader.getFieldNames(IndexReader.FieldOption.UNINDEXED);
+    assertTrue(result != null);
+    assertTrue(result.size() == DocHelper.unindexed.size());
+    //Get all indexed fields that are storing term vectors
+    result = reader.getFieldNames(IndexReader.FieldOption.INDEXED_WITH_TERMVECTOR);
+    assertTrue(result != null);
+    assertTrue(result.size() == DocHelper.termvector.size());
+    
+    result = reader.getFieldNames(IndexReader.FieldOption.INDEXED_NO_TERMVECTOR);
+    assertTrue(result != null);
+    assertTrue(result.size() == DocHelper.notermvector.size());
+  } 
+  
+  public void testTerms() throws IOException {
+    TermEnum terms = reader.terms();
+    assertTrue(terms != null);
+    while (terms.next() == true)
+    {
+      Term term = terms.term();
+      assertTrue(term != null);
+      //System.out.println("Term: " + term);
+      String fieldValue = (String)DocHelper.nameValues.get(term.field());
+      assertTrue(fieldValue.indexOf(term.text()) != -1);
+    }
+    
+    TermDocs termDocs = reader.termDocs();
+    assertTrue(termDocs != null);
+    termDocs.seek(new Term(DocHelper.TEXT_FIELD_1_KEY, "field"));
+    assertTrue(termDocs.next() == true);
+
+    termDocs.seek(new Term(DocHelper.NO_NORMS_KEY,  DocHelper.NO_NORMS_TEXT));
+    assertTrue(termDocs.next() == true);
+
+    
+    TermPositions positions = reader.termPositions();
+    assertTrue(positions != null);
+    positions.seek(new Term(DocHelper.TEXT_FIELD_1_KEY, "field"));
+    assertTrue(positions.doc() == 0);
+    assertTrue(positions.nextPosition() >= 0);
+  }    
+  
+  public void testNorms() throws IOException {
+    //TODO: Not sure how these work/should be tested
+/*
+    try {
+      byte [] norms = reader.norms(DocHelper.TEXT_FIELD_1_KEY);
+      System.out.println("Norms: " + norms);
+      assertTrue(norms != null);
+    } catch (IOException e) {
+      e.printStackTrace();
+      assertTrue(false);
+    }
+*/
+
+    checkNorms(reader);
+  }
+
+  public static void checkNorms(IndexReader reader) throws IOException {
+        // test omit norms
+    for (int i=0; i<DocHelper.fields.length; i++) {
+      Fieldable f = DocHelper.fields[i];
+      if (f.isIndexed()) {
+        assertEquals(reader.hasNorms(f.name()), !f.getOmitNorms());
+        assertEquals(reader.hasNorms(f.name()), !DocHelper.noNorms.containsKey(f.name()));
+        if (!reader.hasNorms(f.name())) {
+          // test for fake norms of 1.0 or null depending on the flag
+          byte [] norms = reader.norms(f.name());
+          byte norm1 = Similarity.getDefault().encodeNormValue(1.0f);
+          assertNull(norms);
+          norms = new byte[reader.maxDoc()];
+          reader.norms(f.name(),norms, 0);
+          for (int j=0; j<reader.maxDoc(); j++) {
+            assertEquals(norms[j], norm1);
+          }
+        }
+      }
+    }
+  }
+  
+  public void testTermVectors() throws IOException {
+    TermFreqVector result = reader.getTermFreqVector(0, DocHelper.TEXT_FIELD_2_KEY);
+    assertTrue(result != null);
+    String [] terms = result.getTerms();
+    int [] freqs = result.getTermFrequencies();
+    assertTrue(terms != null && terms.length == 3 && freqs != null && freqs.length == 3);
+    for (int i = 0; i < terms.length; i++) {
+      String term = terms[i];
+      int freq = freqs[i];
+      assertTrue(DocHelper.FIELD_2_TEXT.indexOf(term) != -1);
+      assertTrue(freq > 0);
+    }
+
+    TermFreqVector [] results = reader.getTermFreqVectors(0);
+    assertTrue(results != null);
+    assertTrue("We do not have 3 term freq vectors, we have: " + results.length, results.length == 3);      
+  }    
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestSegmentTermDocs.java b/lucene/backwards/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
new file mode 100644
index 0000000..e73ff1c
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
@@ -0,0 +1,241 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+
+import java.io.IOException;
+
+public class TestSegmentTermDocs extends LuceneTestCase {
+  private Document testDoc = new Document();
+  private Directory dir;
+  private SegmentInfo info;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    dir = newDirectory();
+    DocHelper.setupDoc(testDoc);
+    info = DocHelper.writeDoc(random, dir, testDoc);
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    dir.close();
+    super.tearDown();
+  }
+
+  public void test() {
+    assertTrue(dir != null);
+  }
+  
+  public void testTermDocs() throws IOException {
+    testTermDocs(1);
+  }
+
+  public void testTermDocs(int indexDivisor) throws IOException {
+    //After adding the document, we should be able to read it back in
+    SegmentReader reader = SegmentReader.get(true, info, indexDivisor);
+    assertTrue(reader != null);
+    assertEquals(indexDivisor, reader.getTermInfosIndexDivisor());
+    SegmentTermDocs segTermDocs = new SegmentTermDocs(reader);
+    segTermDocs.seek(new Term(DocHelper.TEXT_FIELD_2_KEY, "field"));
+    if (segTermDocs.next() == true)
+    {
+      int docId = segTermDocs.doc();
+      assertTrue(docId == 0);
+      int freq = segTermDocs.freq();
+      assertTrue(freq == 3);  
+    }
+    reader.close();
+  }  
+  
+  public void testBadSeek() throws IOException {
+    testBadSeek(1);
+  }
+
+  public void testBadSeek(int indexDivisor) throws IOException {
+    {
+      //After adding the document, we should be able to read it back in
+      SegmentReader reader = SegmentReader.get(true, info, indexDivisor);
+      assertTrue(reader != null);
+      SegmentTermDocs segTermDocs = new SegmentTermDocs(reader);
+      segTermDocs.seek(new Term("textField2", "bad"));
+      assertTrue(segTermDocs.next() == false);
+      reader.close();
+    }
+    {
+      //After adding the document, we should be able to read it back in
+      SegmentReader reader = SegmentReader.get(true, info, indexDivisor);
+      assertTrue(reader != null);
+      SegmentTermDocs segTermDocs = new SegmentTermDocs(reader);
+      segTermDocs.seek(new Term("junk", "bad"));
+      assertTrue(segTermDocs.next() == false);
+      reader.close();
+    }
+  }
+  
+  public void testSkipTo() throws IOException {
+    testSkipTo(1);
+  }
+
+  public void testSkipTo(int indexDivisor) throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    
+    Term ta = new Term("content","aaa");
+    for(int i = 0; i < 10; i++)
+      addDoc(writer, "aaa aaa aaa aaa");
+      
+    Term tb = new Term("content","bbb");
+    for(int i = 0; i < 16; i++)
+      addDoc(writer, "bbb bbb bbb bbb");
+      
+    Term tc = new Term("content","ccc");
+    for(int i = 0; i < 50; i++)
+      addDoc(writer, "ccc ccc ccc ccc");
+      
+    // assure that we deal with a single segment  
+    writer.optimize();
+    writer.close();
+    
+    IndexReader reader = IndexReader.open(dir, null, true, indexDivisor);
+
+    TermDocs tdocs = reader.termDocs();
+    
+    // without optimization (assumption skipInterval == 16)
+    
+    // with next
+    tdocs.seek(ta);
+    assertTrue(tdocs.next());
+    assertEquals(0, tdocs.doc());
+    assertEquals(4, tdocs.freq());
+    assertTrue(tdocs.next());
+    assertEquals(1, tdocs.doc());
+    assertEquals(4, tdocs.freq());
+    assertTrue(tdocs.skipTo(0));
+    assertEquals(2, tdocs.doc());
+    assertTrue(tdocs.skipTo(4));
+    assertEquals(4, tdocs.doc());
+    assertTrue(tdocs.skipTo(9));
+    assertEquals(9, tdocs.doc());
+    assertFalse(tdocs.skipTo(10));
+    
+    // without next
+    tdocs.seek(ta);
+    assertTrue(tdocs.skipTo(0));
+    assertEquals(0, tdocs.doc());
+    assertTrue(tdocs.skipTo(4));
+    assertEquals(4, tdocs.doc());
+    assertTrue(tdocs.skipTo(9));
+    assertEquals(9, tdocs.doc());
+    assertFalse(tdocs.skipTo(10));
+    
+    // exactly skipInterval documents and therefore with optimization
+    
+    // with next
+    tdocs.seek(tb);
+    assertTrue(tdocs.next());
+    assertEquals(10, tdocs.doc());
+    assertEquals(4, tdocs.freq());
+    assertTrue(tdocs.next());
+    assertEquals(11, tdocs.doc());
+    assertEquals(4, tdocs.freq());
+    assertTrue(tdocs.skipTo(5));
+    assertEquals(12, tdocs.doc());
+    assertTrue(tdocs.skipTo(15));
+    assertEquals(15, tdocs.doc());
+    assertTrue(tdocs.skipTo(24));
+    assertEquals(24, tdocs.doc());
+    assertTrue(tdocs.skipTo(25));
+    assertEquals(25, tdocs.doc());
+    assertFalse(tdocs.skipTo(26));
+    
+    // without next
+    tdocs.seek(tb);
+    assertTrue(tdocs.skipTo(5));
+    assertEquals(10, tdocs.doc());
+    assertTrue(tdocs.skipTo(15));
+    assertEquals(15, tdocs.doc());
+    assertTrue(tdocs.skipTo(24));
+    assertEquals(24, tdocs.doc());
+    assertTrue(tdocs.skipTo(25));
+    assertEquals(25, tdocs.doc());
+    assertFalse(tdocs.skipTo(26));
+    
+    // much more than skipInterval documents and therefore with optimization
+    
+    // with next
+    tdocs.seek(tc);
+    assertTrue(tdocs.next());
+    assertEquals(26, tdocs.doc());
+    assertEquals(4, tdocs.freq());
+    assertTrue(tdocs.next());
+    assertEquals(27, tdocs.doc());
+    assertEquals(4, tdocs.freq());
+    assertTrue(tdocs.skipTo(5));
+    assertEquals(28, tdocs.doc());
+    assertTrue(tdocs.skipTo(40));
+    assertEquals(40, tdocs.doc());
+    assertTrue(tdocs.skipTo(57));
+    assertEquals(57, tdocs.doc());
+    assertTrue(tdocs.skipTo(74));
+    assertEquals(74, tdocs.doc());
+    assertTrue(tdocs.skipTo(75));
+    assertEquals(75, tdocs.doc());
+    assertFalse(tdocs.skipTo(76));
+    
+    //without next
+    tdocs.seek(tc);
+    assertTrue(tdocs.skipTo(5));
+    assertEquals(26, tdocs.doc());
+    assertTrue(tdocs.skipTo(40));
+    assertEquals(40, tdocs.doc());
+    assertTrue(tdocs.skipTo(57));
+    assertEquals(57, tdocs.doc());
+    assertTrue(tdocs.skipTo(74));
+    assertEquals(74, tdocs.doc());
+    assertTrue(tdocs.skipTo(75));
+    assertEquals(75, tdocs.doc());
+    assertFalse(tdocs.skipTo(76));
+    
+    tdocs.close();
+    reader.close();
+    dir.close();
+  }
+  
+  public void testIndexDivisor() throws IOException {
+    testDoc = new Document();
+    DocHelper.setupDoc(testDoc);
+    DocHelper.writeDoc(random, dir, testDoc);
+    testTermDocs(2);
+    testBadSeek(2);
+    testSkipTo(2);
+  }
+
+  private void addDoc(IndexWriter writer, String value) throws IOException
+  {
+      Document doc = new Document();
+      doc.add(newField("content", value, Field.Store.NO, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestSegmentTermEnum.java b/lucene/backwards/src/test/org/apache/lucene/index/TestSegmentTermEnum.java
new file mode 100644
index 0000000..f5468e8
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestSegmentTermEnum.java
@@ -0,0 +1,132 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.Directory;
+
+
+public class TestSegmentTermEnum extends LuceneTestCase {
+  
+  Directory dir;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    dir = newDirectory();
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    dir.close();
+    super.tearDown();
+  }
+
+  public void testTermEnum() throws IOException {
+    IndexWriter writer = null;
+
+    writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+
+    // ADD 100 documents with term : aaa
+    // add 100 documents with terms: aaa bbb
+    // Therefore, term 'aaa' has document frequency of 200 and term 'bbb' 100
+    for (int i = 0; i < 100; i++) {
+      addDoc(writer, "aaa");
+      addDoc(writer, "aaa bbb");
+    }
+
+    writer.close();
+
+    // verify document frequency of terms in an unoptimized index
+    verifyDocFreq();
+
+    // merge segments by optimizing the index
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+    writer.optimize();
+    writer.close();
+
+    // verify document frequency of terms in an optimized index
+    verifyDocFreq();
+  }
+
+  public void testPrevTermAtEnd() throws IOException
+  {
+    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    addDoc(writer, "aaa bbb");
+    writer.close();
+    SegmentReader reader = SegmentReader.getOnlySegmentReader(dir);
+    SegmentTermEnum termEnum = (SegmentTermEnum) reader.terms();
+    assertTrue(termEnum.next());
+    assertEquals("aaa", termEnum.term().text());
+    assertTrue(termEnum.next());
+    assertEquals("aaa", termEnum.prev().text());
+    assertEquals("bbb", termEnum.term().text());
+    assertFalse(termEnum.next());
+    assertEquals("bbb", termEnum.prev().text());
+    reader.close();
+  }
+
+  private void verifyDocFreq()
+      throws IOException
+  {
+      IndexReader reader = IndexReader.open(dir, true);
+      TermEnum termEnum = null;
+
+    // create enumeration of all terms
+    termEnum = reader.terms();
+    // go to the first term (aaa)
+    termEnum.next();
+    // assert that term is 'aaa'
+    assertEquals("aaa", termEnum.term().text());
+    assertEquals(200, termEnum.docFreq());
+    // go to the second term (bbb)
+    termEnum.next();
+    // assert that term is 'bbb'
+    assertEquals("bbb", termEnum.term().text());
+    assertEquals(100, termEnum.docFreq());
+
+    termEnum.close();
+
+
+    // create enumeration of terms after term 'aaa', including 'aaa'
+    termEnum = reader.terms(new Term("content", "aaa"));
+    // assert that term is 'aaa'
+    assertEquals("aaa", termEnum.term().text());
+    assertEquals(200, termEnum.docFreq());
+    // go to term 'bbb'
+    termEnum.next();
+    // assert that term is 'bbb'
+    assertEquals("bbb", termEnum.term().text());
+    assertEquals(100, termEnum.docFreq());
+    termEnum.close();
+    reader.close();
+  }
+
+  private void addDoc(IndexWriter writer, String value) throws IOException
+  {
+    Document doc = new Document();
+    doc.add(newField("content", value, Field.Store.NO, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestSizeBoundedOptimize.java b/lucene/backwards/src/test/org/apache/lucene/index/TestSizeBoundedOptimize.java
new file mode 100644
index 0000000..5de97fa
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestSizeBoundedOptimize.java
@@ -0,0 +1,368 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestSizeBoundedOptimize extends LuceneTestCase {
+
+  private void addDocs(IndexWriter writer, int numDocs) throws IOException {
+    for (int i = 0; i < numDocs; i++) {
+      Document doc = new Document();
+      writer.addDocument(doc);
+    }
+    writer.commit();
+  }
+  
+  private static IndexWriterConfig newWriterConfig() throws IOException {
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, null);
+    conf.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+    conf.setRAMBufferSizeMB(IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB);
+    // prevent any merges by default.
+    conf.setMergePolicy(NoMergePolicy.COMPOUND_FILES);
+    return conf;
+  }
+  
+  public void testByteSizeLimit() throws Exception {
+    // tests that the max merge size constraint is applied during optimize.
+    Directory dir = new RAMDirectory();
+
+    // Prepare an index w/ several small segments and a large one.
+    IndexWriterConfig conf = newWriterConfig();
+    IndexWriter writer = new IndexWriter(dir, conf);
+    final int numSegments = 15;
+    for (int i = 0; i < numSegments; i++) {
+      int numDocs = i == 7 ? 30 : 1;
+      addDocs(writer, numDocs);
+    }
+    writer.close();
+    
+    SegmentInfos sis = new SegmentInfos();
+    sis.read(dir);
+    double min = sis.info(0).sizeInBytes(true);
+    
+    conf = newWriterConfig();
+    LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
+    lmp.setMaxMergeMBForOptimize((min + 1) / (1 << 20));
+    conf.setMergePolicy(lmp);
+    
+    writer = new IndexWriter(dir, conf);
+    writer.optimize();
+    writer.close();
+
+    // Should only be 3 segments in the index, because one of them exceeds the size limit
+    sis = new SegmentInfos();
+    sis.read(dir);
+    assertEquals(3, sis.size());
+  }
+
+  public void testNumDocsLimit() throws Exception {
+    // tests that the max merge docs constraint is applied during optimize.
+    Directory dir = new RAMDirectory();
+
+    // Prepare an index w/ several small segments and a large one.
+    IndexWriterConfig conf = newWriterConfig();
+    IndexWriter writer = new IndexWriter(dir, conf);
+
+    addDocs(writer, 3);
+    addDocs(writer, 3);
+    addDocs(writer, 5);
+    addDocs(writer, 3);
+    addDocs(writer, 3);
+    addDocs(writer, 3);
+    addDocs(writer, 3);
+    
+    writer.close();
+
+    conf = newWriterConfig();
+    LogMergePolicy lmp = new LogDocMergePolicy();
+    lmp.setMaxMergeDocs(3);
+    conf.setMergePolicy(lmp);
+    
+    writer = new IndexWriter(dir, conf);
+    writer.optimize();
+    writer.close();
+
+    // Should only be 3 segments in the index, because one of them exceeds the size limit
+    SegmentInfos sis = new SegmentInfos();
+    sis.read(dir);
+    assertEquals(3, sis.size());
+  }
+
+  public void testLastSegmentTooLarge() throws Exception {
+    Directory dir = new RAMDirectory();
+
+    IndexWriterConfig conf = newWriterConfig();
+    IndexWriter writer = new IndexWriter(dir, conf);
+
+    addDocs(writer, 3);
+    addDocs(writer, 3);
+    addDocs(writer, 3);
+    addDocs(writer, 5);
+    
+    writer.close();
+
+    conf = newWriterConfig();
+    LogMergePolicy lmp = new LogDocMergePolicy();
+    lmp.setMaxMergeDocs(3);
+    conf.setMergePolicy(lmp);
+    
+    writer = new IndexWriter(dir, conf);
+    writer.optimize();
+    writer.close();
+
+    SegmentInfos sis = new SegmentInfos();
+    sis.read(dir);
+    assertEquals(2, sis.size());
+  }
+  
+  public void testFirstSegmentTooLarge() throws Exception {
+    Directory dir = new RAMDirectory();
+    
+    IndexWriterConfig conf = newWriterConfig();
+    IndexWriter writer = new IndexWriter(dir, conf);
+    
+    addDocs(writer, 5);
+    addDocs(writer, 3);
+    addDocs(writer, 3);
+    addDocs(writer, 3);
+    
+    writer.close();
+    
+    conf = newWriterConfig();
+    LogMergePolicy lmp = new LogDocMergePolicy();
+    lmp.setMaxMergeDocs(3);
+    conf.setMergePolicy(lmp);
+    
+    writer = new IndexWriter(dir, conf);
+    writer.optimize();
+    writer.close();
+    
+    SegmentInfos sis = new SegmentInfos();
+    sis.read(dir);
+    assertEquals(2, sis.size());
+  }
+  
+  public void testAllSegmentsSmall() throws Exception {
+    Directory dir = new RAMDirectory();
+    
+    IndexWriterConfig conf = newWriterConfig();
+    IndexWriter writer = new IndexWriter(dir, conf);
+    
+    addDocs(writer, 3);
+    addDocs(writer, 3);
+    addDocs(writer, 3);
+    addDocs(writer, 3);
+    
+    writer.close();
+    
+    conf = newWriterConfig();
+    LogMergePolicy lmp = new LogDocMergePolicy();
+    lmp.setMaxMergeDocs(3);
+    conf.setMergePolicy(lmp);
+    
+    writer = new IndexWriter(dir, conf);
+    writer.optimize();
+    writer.close();
+    
+    SegmentInfos sis = new SegmentInfos();
+    sis.read(dir);
+    assertEquals(1, sis.size());
+  }
+  
+  public void testAllSegmentsLarge() throws Exception {
+    Directory dir = new RAMDirectory();
+    
+    IndexWriterConfig conf = newWriterConfig();
+    IndexWriter writer = new IndexWriter(dir, conf);
+    
+    addDocs(writer, 3);
+    addDocs(writer, 3);
+    addDocs(writer, 3);
+    
+    writer.close();
+    
+    conf = newWriterConfig();
+    LogMergePolicy lmp = new LogDocMergePolicy();
+    lmp.setMaxMergeDocs(2);
+    conf.setMergePolicy(lmp);
+    
+    writer = new IndexWriter(dir, conf);
+    writer.optimize();
+    writer.close();
+    
+    SegmentInfos sis = new SegmentInfos();
+    sis.read(dir);
+    assertEquals(3, sis.size());
+  }
+  
+  public void testOneLargeOneSmall() throws Exception {
+    Directory dir = new RAMDirectory();
+    
+    IndexWriterConfig conf = newWriterConfig();
+    IndexWriter writer = new IndexWriter(dir, conf);
+    
+    addDocs(writer, 3);
+    addDocs(writer, 5);
+    addDocs(writer, 3);
+    addDocs(writer, 5);
+    
+    writer.close();
+    
+    conf = newWriterConfig();
+    LogMergePolicy lmp = new LogDocMergePolicy();
+    lmp.setMaxMergeDocs(3);
+    conf.setMergePolicy(lmp);
+    
+    writer = new IndexWriter(dir, conf);
+    writer.optimize();
+    writer.close();
+    
+    SegmentInfos sis = new SegmentInfos();
+    sis.read(dir);
+    assertEquals(4, sis.size());
+  }
+  
+  public void testMergeFactor() throws Exception {
+    Directory dir = new RAMDirectory();
+    
+    IndexWriterConfig conf = newWriterConfig();
+    IndexWriter writer = new IndexWriter(dir, conf);
+    
+    addDocs(writer, 3);
+    addDocs(writer, 3);
+    addDocs(writer, 3);
+    addDocs(writer, 3);
+    addDocs(writer, 5);
+    addDocs(writer, 3);
+    addDocs(writer, 3);
+    
+    writer.close();
+    
+    conf = newWriterConfig();
+    LogMergePolicy lmp = new LogDocMergePolicy();
+    lmp.setMaxMergeDocs(3);
+    lmp.setMergeFactor(2);
+    conf.setMergePolicy(lmp);
+    
+    writer = new IndexWriter(dir, conf);
+    writer.optimize();
+    writer.close();
+    
+    // Should only be 4 segments in the index, because of the merge factor and
+    // max merge docs settings.
+    SegmentInfos sis = new SegmentInfos();
+    sis.read(dir);
+    assertEquals(4, sis.size());
+  }
+  
+  public void testSingleNonOptimizedSegment() throws Exception {
+    Directory dir = new RAMDirectory();
+    
+    IndexWriterConfig conf = newWriterConfig();
+    IndexWriter writer = new IndexWriter(dir, conf);
+    
+    addDocs(writer, 3);
+    addDocs(writer, 5);
+    addDocs(writer, 3);
+    
+    writer.close();
+  
+    // delete the last document, so that the last segment is optimized.
+    IndexReader r = IndexReader.open(dir, false);
+    r.deleteDocument(r.numDocs() - 1);
+    r.close();
+    
+    conf = newWriterConfig();
+    LogMergePolicy lmp = new LogDocMergePolicy();
+    lmp.setMaxMergeDocs(3);
+    conf.setMergePolicy(lmp);
+    
+    writer = new IndexWriter(dir, conf);
+    writer.optimize();
+    writer.close();
+    
+    // Verify that the last segment does not have deletions.
+    SegmentInfos sis = new SegmentInfos();
+    sis.read(dir);
+    assertEquals(3, sis.size());
+    assertFalse(sis.info(2).hasDeletions());
+  }
+  
+  public void testSingleOptimizedSegment() throws Exception {
+    Directory dir = new RAMDirectory();
+    
+    IndexWriterConfig conf = newWriterConfig();
+    IndexWriter writer = new IndexWriter(dir, conf);
+    
+    addDocs(writer, 3);
+    
+    writer.close();
+    
+    conf = newWriterConfig();
+    LogMergePolicy lmp = new LogDocMergePolicy();
+    lmp.setMaxMergeDocs(3);
+    conf.setMergePolicy(lmp);
+    
+    writer = new IndexWriter(dir, conf);
+    writer.optimize();
+    writer.close();
+    
+    // Verify that the last segment does not have deletions.
+    SegmentInfos sis = new SegmentInfos();
+    sis.read(dir);
+    assertEquals(1, sis.size());
+  }
+
+  public void testSingleNonOptimizedTooLargeSegment() throws Exception {
+    Directory dir = new RAMDirectory();
+    
+    IndexWriterConfig conf = newWriterConfig();
+    IndexWriter writer = new IndexWriter(dir, conf);
+    
+    addDocs(writer, 5);
+    
+    writer.close();
+  
+    // delete the last document
+    IndexReader r = IndexReader.open(dir, false);
+    r.deleteDocument(r.numDocs() - 1);
+    r.close();
+    
+    conf = newWriterConfig();
+    LogMergePolicy lmp = new LogDocMergePolicy();
+    lmp.setMaxMergeDocs(2);
+    conf.setMergePolicy(lmp);
+    
+    writer = new IndexWriter(dir, conf);
+    writer.optimize();
+    writer.close();
+    
+    // Verify that the last segment does not have deletions.
+    SegmentInfos sis = new SegmentInfos();
+    sis.read(dir);
+    assertEquals(1, sis.size());
+    assertTrue(sis.info(0).hasDeletions());
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java b/lucene/backwards/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java
new file mode 100644
index 0000000..37bdf23
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java
@@ -0,0 +1,445 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Random;
+import java.io.IOException;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.analysis.KeywordAnalyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.index.IndexCommit;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.SnapshotDeletionPolicy;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.ThreadInterruptedException;
+import org.junit.Test;
+
+//
+// This was developed for Lucene In Action,
+// http://lucenebook.com
+//
+
+public class TestSnapshotDeletionPolicy extends LuceneTestCase {
+  public static final String INDEX_PATH = "test.snapshots";
+  
+  protected IndexWriterConfig getConfig(Random random, IndexDeletionPolicy dp) {
+    IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    if (dp != null) {
+      conf.setIndexDeletionPolicy(dp);
+    }
+    return conf;
+  }
+
+  protected void checkSnapshotExists(Directory dir, IndexCommit c) throws Exception {
+    String segFileName = c.getSegmentsFileName();
+    assertTrue("segments file not found in directory: " + segFileName, dir.fileExists(segFileName));
+  }
+
+  protected void checkMaxDoc(IndexCommit commit, int expectedMaxDoc) throws Exception {
+    IndexReader reader = IndexReader.open(commit, true);
+    try {
+      assertEquals(expectedMaxDoc, reader.maxDoc());
+    } finally {
+      reader.close();
+    }
+  }
+
+  protected void prepareIndexAndSnapshots(SnapshotDeletionPolicy sdp,
+      IndexWriter writer, int numSnapshots, String snapshotPrefix)
+      throws RuntimeException, IOException {
+    for (int i = 0; i < numSnapshots; i++) {
+      // create dummy document to trigger commit.
+      writer.addDocument(new Document());
+      writer.commit();
+      sdp.snapshot(snapshotPrefix + i);
+    }
+  }
+
+  protected SnapshotDeletionPolicy getDeletionPolicy() throws IOException {
+    return getDeletionPolicy(null);
+  }
+
+  protected SnapshotDeletionPolicy getDeletionPolicy(Map<String, String> snapshots) throws IOException {
+    return new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy(), snapshots);
+  }
+
+  protected void assertSnapshotExists(Directory dir, SnapshotDeletionPolicy sdp, int numSnapshots) throws Exception {
+    for (int i = 0; i < numSnapshots; i++) {
+      IndexCommit snapshot = sdp.getSnapshot("snapshot" + i);
+      checkMaxDoc(snapshot, i + 1);
+      checkSnapshotExists(dir, snapshot);
+    }
+  }
+  
+  @Test
+  public void testSnapshotDeletionPolicy() throws Exception {
+    Directory fsDir = newDirectory();
+    runTest(random, fsDir);
+    fsDir.close();
+  }
+
+  private void runTest(Random random, Directory dir) throws Exception {
+    // Run for ~1 seconds
+    final long stopTime = System.currentTimeMillis() + 1000;
+
+    SnapshotDeletionPolicy dp = getDeletionPolicy();
+    final IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(dp)
+        .setMaxBufferedDocs(2));
+    writer.commit();
+    
+    final Thread t = new Thread() {
+        @Override
+        public void run() {
+          Document doc = new Document();
+          doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+          do {
+            for(int i=0;i<27;i++) {
+              try {
+                writer.addDocument(doc);
+              } catch (Throwable t) {
+                t.printStackTrace(System.out);
+                fail("addDocument failed");
+              }
+              if (i%2 == 0) {
+                try {
+                  writer.commit();
+                } catch (Exception e) {
+                  throw new RuntimeException(e);
+                }
+              }
+            }
+            try {
+              Thread.sleep(1);
+            } catch (InterruptedException ie) {
+              throw new ThreadInterruptedException(ie);
+            }
+          } while(System.currentTimeMillis() < stopTime);
+        }
+      };
+
+    t.start();
+
+    // While the above indexing thread is running, take many
+    // backups:
+    do {
+      backupIndex(dir, dp);
+      Thread.sleep(20);
+    } while(t.isAlive());
+
+    t.join();
+
+    // Add one more document to force writer to commit a
+    // final segment, so deletion policy has a chance to
+    // delete again:
+    Document doc = new Document();
+    doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    writer.addDocument(doc);
+
+    // Make sure we don't have any leftover files in the
+    // directory:
+    writer.close();
+    TestIndexWriter.assertNoUnreferencedFiles(dir, "some files were not deleted but should have been");
+  }
+
+  /**
+   * Example showing how to use the SnapshotDeletionPolicy to take a backup.
+   * This method does not really do a backup; instead, it reads every byte of
+   * every file just to test that the files indeed exist and are readable even
+   * while the index is changing.
+   */
+  public void backupIndex(Directory dir, SnapshotDeletionPolicy dp) throws Exception {
+    // To backup an index we first take a snapshot:
+    try {
+      copyFiles(dir,  dp.snapshot("id"));
+    } finally {
+      // Make sure to release the snapshot, otherwise these
+      // files will never be deleted during this IndexWriter
+      // session:
+      dp.release("id");
+    }
+  }
+
+  private void copyFiles(Directory dir, IndexCommit cp) throws Exception {
+
+    // While we hold the snapshot, and nomatter how long
+    // we take to do the backup, the IndexWriter will
+    // never delete the files in the snapshot:
+    Collection<String> files = cp.getFileNames();
+    for (final String fileName : files) { 
+      // NOTE: in a real backup you would not use
+      // readFile; you would need to use something else
+      // that copies the file to a backup location.  This
+      // could even be a spawned shell process (eg "tar",
+      // "zip") that takes the list of files and builds a
+      // backup.
+      readFile(dir, fileName);
+    }
+  }
+
+  byte[] buffer = new byte[4096];
+
+  private void readFile(Directory dir, String name) throws Exception {
+    IndexInput input = dir.openInput(name);
+    try {
+      long size = dir.fileLength(name);
+      long bytesLeft = size;
+      while (bytesLeft > 0) {
+        final int numToRead;
+        if (bytesLeft < buffer.length)
+          numToRead = (int) bytesLeft;
+        else
+          numToRead = buffer.length;
+        input.readBytes(buffer, 0, numToRead, false);
+        bytesLeft -= numToRead;
+      }
+      // Don't do this in your real backups!  This is just
+      // to force a backup to take a somewhat long time, to
+      // make sure we are exercising the fact that the
+      // IndexWriter should not delete this file even when I
+      // take my time reading it.
+      Thread.sleep(1);
+    } finally {
+      input.close();
+    }
+  }
+
+  
+  @Test
+  public void testBasicSnapshots() throws Exception {
+    int numSnapshots = 3;
+    SnapshotDeletionPolicy sdp = getDeletionPolicy();
+    
+    // Create 3 snapshots: snapshot0, snapshot1, snapshot2
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp));
+    prepareIndexAndSnapshots(sdp, writer, numSnapshots, "snapshot");
+    writer.close();
+    
+    assertSnapshotExists(dir, sdp, numSnapshots);
+
+    // open a reader on a snapshot - should succeed.
+    IndexReader.open(sdp.getSnapshot("snapshot0"), true).close();
+
+    // open a new IndexWriter w/ no snapshots to keep and assert that all snapshots are gone.
+    sdp = getDeletionPolicy();
+    writer = new IndexWriter(dir, getConfig(random, sdp));
+    writer.deleteUnusedFiles();
+    writer.close();
+    assertEquals("no snapshots should exist", 1, IndexReader.listCommits(dir).size());
+    
+    for (int i = 0; i < numSnapshots; i++) {
+      try {
+        sdp.getSnapshot("snapshot" + i);
+        fail("snapshot shouldn't have existed, but did: snapshot" + i);
+      } catch (IllegalStateException e) {
+        // expected - snapshot should not exist
+      }
+    }
+    dir.close();
+  }
+
+  @Test
+  public void testMultiThreadedSnapshotting() throws Exception {
+    Directory dir = newDirectory();
+    final SnapshotDeletionPolicy sdp = getDeletionPolicy();
+    final IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp));
+
+    Thread[] threads = new Thread[10];
+    for (int i = 0; i < threads.length; i++) {
+      threads[i] = new Thread() {
+        @Override
+        public void run() {
+          try {
+            writer.addDocument(new Document());
+            writer.commit();
+            sdp.snapshot(getName());
+          } catch (Exception e) {
+            throw new RuntimeException(e);
+          }
+        }
+      };
+      threads[i].setName("t" + i);
+    }
+    
+    for (Thread t : threads) {
+      t.start();
+    }
+    
+    for (Thread t : threads) {
+      t.join();
+    }
+
+    // Do one last commit, so that after we release all snapshots, we stay w/ one commit
+    writer.addDocument(new Document());
+    writer.commit();
+    
+    for (Thread t : threads) {
+      sdp.release(t.getName());
+      writer.deleteUnusedFiles();
+    }
+    assertEquals(1, IndexReader.listCommits(dir).size());
+    writer.close();
+    dir.close();
+  }
+
+  @Test
+  public void testRollbackToOldSnapshot() throws Exception {
+    int numSnapshots = 2;
+    Directory dir = newDirectory();
+    SnapshotDeletionPolicy sdp = getDeletionPolicy();
+    IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp));
+    prepareIndexAndSnapshots(sdp, writer, numSnapshots, "snapshot");
+    writer.close();
+
+    // now open the writer on "snapshot0" - make sure it succeeds
+    writer = new IndexWriter(dir, getConfig(random, sdp).setIndexCommit(sdp.getSnapshot("snapshot0")));
+    // this does the actual rollback
+    writer.commit();
+    writer.deleteUnusedFiles();
+    assertSnapshotExists(dir, sdp, numSnapshots - 1);
+    writer.close();
+    
+    // but 'snapshot1' files will still exist (need to release snapshot before they can be deleted).
+    String segFileName = sdp.getSnapshot("snapshot1").getSegmentsFileName();
+    assertTrue("snapshot files should exist in the directory: " + segFileName, dir.fileExists(segFileName));
+    dir.close();
+  }
+
+  @Test
+  public void testReleaseSnapshot() throws Exception {
+    Directory dir = newDirectory();
+    SnapshotDeletionPolicy sdp = getDeletionPolicy();
+    IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp));
+    prepareIndexAndSnapshots(sdp, writer, 1, "snapshot");
+    
+    // Create another commit - we must do that, because otherwise the "snapshot"
+    // files will still remain in the index, since it's the last commit.
+    writer.addDocument(new Document());
+    writer.commit();
+    
+    // Release
+    String snapId = "snapshot0";
+    String segFileName = sdp.getSnapshot(snapId).getSegmentsFileName();
+    sdp.release(snapId);
+    try {
+      sdp.getSnapshot(snapId);
+      fail("should not have succeeded to get an unsnapshotted id");
+    } catch (IllegalStateException e) {
+      // expected
+    }
+    assertNull(sdp.getSnapshots().get(snapId));
+    writer.deleteUnusedFiles();
+    writer.close();
+    assertFalse("segments file should not be found in dirctory: " + segFileName, dir.fileExists(segFileName));
+    dir.close();
+  }
+
+  @Test
+  public void testExistingSnapshots() throws Exception {
+    // Tests the ability to construct a SDP from existing snapshots, and
+    // asserts that those snapshots/commit points are protected.
+    int numSnapshots = 3;
+    Directory dir = newDirectory();
+    SnapshotDeletionPolicy sdp = getDeletionPolicy();
+    IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp));
+    prepareIndexAndSnapshots(sdp, writer, numSnapshots, "snapshot");
+    writer.close();
+
+    // Make a new policy and initialize with snapshots.
+    sdp = getDeletionPolicy(sdp.getSnapshots());
+    writer = new IndexWriter(dir, getConfig(random, sdp));
+    // attempt to delete unused files - the snapshotted files should not be deleted
+    writer.deleteUnusedFiles();
+    writer.close();
+    assertSnapshotExists(dir, sdp, numSnapshots);
+    dir.close();
+  }
+
+  @Test
+  public void testSnapshotLastCommitTwice() throws Exception {
+    Directory dir = newDirectory();
+    SnapshotDeletionPolicy sdp = getDeletionPolicy();
+    IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp));
+    writer.addDocument(new Document());
+    writer.commit();
+    
+    String s1 = "s1";
+    String s2 = "s2";
+    IndexCommit ic1 = sdp.snapshot(s1);
+    IndexCommit ic2 = sdp.snapshot(s2);
+    assertTrue(ic1 == ic2); // should be the same instance
+    
+    // create another commit
+    writer.addDocument(new Document());
+    writer.commit();
+    
+    // release "s1" should not delete "s2"
+    sdp.release(s1);
+    writer.deleteUnusedFiles();
+    checkSnapshotExists(dir, ic2);
+    
+    writer.close();
+    dir.close();
+  }
+  
+  @Test
+  public void testMissingCommits() throws Exception {
+    // Tests the behavior of SDP when commits that are given at ctor are missing
+    // on onInit().
+    Directory dir = newDirectory();
+    SnapshotDeletionPolicy sdp = getDeletionPolicy();
+    IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp));
+    writer.addDocument(new Document());
+    writer.commit();
+    IndexCommit ic = sdp.snapshot("s1");
+
+    // create another commit, not snapshotted.
+    writer.addDocument(new Document());
+    writer.close();
+
+    // open a new writer w/ KeepOnlyLastCommit policy, so it will delete "s1"
+    // commit.
+    new IndexWriter(dir, getConfig(random, null)).close();
+    
+    assertFalse("snapshotted commit should not exist", dir.fileExists(ic.getSegmentsFileName()));
+    
+    // Now reinit SDP from the commits in the index - the snapshot id should not
+    // exist anymore.
+    sdp = getDeletionPolicy(sdp.getSnapshots());
+    new IndexWriter(dir, getConfig(random, sdp)).close();
+    
+    try {
+      sdp.getSnapshot("s1");
+      fail("snapshot s1 should not exist");
+    } catch (IllegalStateException e) {
+      // expected.
+    }
+    dir.close();
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestStressAdvance.java b/lucene/backwards/src/test/org/apache/lucene/index/TestStressAdvance.java
new file mode 100644
index 0000000..763ab9b
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestStressAdvance.java
@@ -0,0 +1,137 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.lucene.util.*;
+import org.apache.lucene.store.*;
+import org.apache.lucene.document.*;
+
+public class TestStressAdvance extends LuceneTestCase {
+
+  public void testStressAdvance() throws Exception {
+    for(int iter=0;iter<3;iter++) {
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter);
+      }
+      Directory dir = newDirectory();
+      RandomIndexWriter w = new RandomIndexWriter(random, dir);
+      final Set<Integer> aDocs = new HashSet<Integer>();
+      final Document doc = new Document();
+      final Field f = newField("field", "", Field.Index.NOT_ANALYZED_NO_NORMS);
+      doc.add(f);
+      final Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
+      doc.add(idField);
+      int num = atLeast(4097);
+      for(int id=0;id<num;id++) {
+        if (random.nextInt(4) == 3) {
+          f.setValue("a");
+          aDocs.add(id);
+        } else {
+          f.setValue("b");
+        }
+        idField.setValue(""+id);
+        w.addDocument(doc);
+      }
+
+      w.optimize();
+
+      final List<Integer> aDocIDs = new ArrayList<Integer>();
+      final List<Integer> bDocIDs = new ArrayList<Integer>();
+
+      final IndexReader r = w.getReader();
+      final int[] idToDocID = new int[r.maxDoc()];
+      for(int docID=0;docID<idToDocID.length;docID++) {
+        int id = Integer.parseInt(r.document(docID).get("id"));
+        if (aDocs.contains(id)) {
+          aDocIDs.add(docID);
+        } else {
+          bDocIDs.add(docID);
+        }
+      }
+      final TermDocs de = r.termDocs();
+      
+      for(int iter2=0;iter2<10;iter2++) {
+        if (VERBOSE) {
+          System.out.println("\nTEST: iter=" + iter + " iter2=" + iter2);
+        }
+        de.seek(new Term("field", "a"));
+        testOne(de, aDocIDs);
+
+        de.seek(new Term("field", "b"));
+        testOne(de, bDocIDs);
+      }
+
+      w.close();
+      r.close();
+      dir.close();
+    }
+  }
+
+  private void testOne(TermDocs docs, List<Integer> expected) throws Exception {
+    if (VERBOSE) {
+      System.out.println("test");
+    }
+    int upto = -1;
+    while(upto < expected.size()) {
+      if (VERBOSE) {
+        System.out.println("  cycle upto=" + upto + " of " + expected.size());
+      }
+      final int docID;
+      if (random.nextInt(4) == 1 || upto == expected.size()-1) {
+        // test nextDoc()
+        if (VERBOSE) {
+          System.out.println("    do nextDoc");
+        }
+        upto++;
+        if (docs.next()) {
+          docID = docs.doc();
+        } else {
+          docID = -1;
+        }
+      } else {
+        // test advance()
+        final int inc = _TestUtil.nextInt(random, 1, expected.size()-1-upto);
+        if (VERBOSE) {
+          System.out.println("    do advance inc=" + inc);
+        }
+        upto += inc;
+        if (docs.skipTo(expected.get(upto))) {
+          docID = docs.doc();
+        } else {
+          docID = -1;
+        }
+      }
+      if (upto == expected.size()) {
+        if (VERBOSE) {
+          System.out.println("  expect docID=" + -1 + " actual=" + docID);
+        }
+        assertEquals(-1, docID);
+      } else {
+        if (VERBOSE) {
+          System.out.println("  expect docID=" + expected.get(upto) + " actual=" + docID);
+        }
+        assertTrue(docID != -1);
+        assertEquals(expected.get(upto).intValue(), docID);
+      }
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestStressIndexing.java b/lucene/backwards/src/test/org/apache/lucene/index/TestStressIndexing.java
new file mode 100644
index 0000000..539554e
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestStressIndexing.java
@@ -0,0 +1,168 @@
+package org.apache.lucene.index;
+
+/**
+ * Copyright 2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.*;
+import org.apache.lucene.store.*;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.*;
+
+public class TestStressIndexing extends LuceneTestCase {
+  private static abstract class TimedThread extends Thread {
+    volatile boolean failed;
+    int count;
+    private static int RUN_TIME_MSEC = atLeast(1000);
+    private TimedThread[] allThreads;
+
+    abstract public void doWork() throws Throwable;
+
+    TimedThread(TimedThread[] threads) {
+      this.allThreads = threads;
+    }
+
+    @Override
+    public void run() {
+      final long stopTime = System.currentTimeMillis() + RUN_TIME_MSEC;
+
+      count = 0;
+
+      try {
+        do {
+          if (anyErrors()) break;
+          doWork();
+          count++;
+        } while(System.currentTimeMillis() < stopTime);
+      } catch (Throwable e) {
+        System.out.println(Thread.currentThread() + ": exc");
+        e.printStackTrace(System.out);
+        failed = true;
+      }
+    }
+
+    private boolean anyErrors() {
+      for(int i=0;i<allThreads.length;i++)
+        if (allThreads[i] != null && allThreads[i].failed)
+          return true;
+      return false;
+    }
+  }
+
+  private class IndexerThread extends TimedThread {
+    IndexWriter writer;
+    int nextID;
+
+    public IndexerThread(IndexWriter writer, TimedThread[] threads) {
+      super(threads);
+      this.writer = writer;
+    }
+
+    @Override
+    public void doWork() throws Exception {
+      // Add 10 docs:
+      for(int j=0; j<10; j++) {
+        Document d = new Document();
+        int n = random.nextInt();
+        d.add(newField("id", Integer.toString(nextID++), Field.Store.YES, Field.Index.NOT_ANALYZED));
+        d.add(newField("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.ANALYZED));
+        writer.addDocument(d);
+      }
+
+      // Delete 5 docs:
+      int deleteID = nextID-1;
+      for(int j=0; j<5; j++) {
+        writer.deleteDocuments(new Term("id", ""+deleteID));
+        deleteID -= 2;
+      }
+    }
+  }
+
+  private static class SearcherThread extends TimedThread {
+    private Directory directory;
+
+    public SearcherThread(Directory directory, TimedThread[] threads) {
+      super(threads);
+      this.directory = directory;
+    }
+
+    @Override
+    public void doWork() throws Throwable {
+      for (int i=0; i<100; i++)
+        (new IndexSearcher(directory, true)).close();
+      count += 100;
+    }
+  }
+
+  /*
+    Run one indexer and 2 searchers against single index as
+    stress test.
+  */
+  public void runStressTest(Directory directory, MergeScheduler mergeScheduler) throws Exception {
+    IndexWriter modifier = new IndexWriter(directory, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10).setMergeScheduler(
+            mergeScheduler));
+    modifier.commit();
+    
+    TimedThread[] threads = new TimedThread[4];
+    int numThread = 0;
+
+
+    // One modifier that writes 10 docs then removes 5, over
+    // and over:
+    IndexerThread indexerThread = new IndexerThread(modifier, threads);
+    threads[numThread++] = indexerThread;
+    indexerThread.start();
+    
+    IndexerThread indexerThread2 = new IndexerThread(modifier, threads);
+    threads[numThread++] = indexerThread2;
+    indexerThread2.start();
+      
+    // Two searchers that constantly just re-instantiate the
+    // searcher:
+    SearcherThread searcherThread1 = new SearcherThread(directory, threads);
+    threads[numThread++] = searcherThread1;
+    searcherThread1.start();
+
+    SearcherThread searcherThread2 = new SearcherThread(directory, threads);
+    threads[numThread++] = searcherThread2;
+    searcherThread2.start();
+
+    for(int i=0;i<numThread;i++)
+      threads[i].join();
+
+    modifier.close();
+
+    for(int i=0;i<numThread;i++)
+      assertTrue(! threads[i].failed);
+
+    //System.out.println("    Writer: " + indexerThread.count + " iterations");
+    //System.out.println("Searcher 1: " + searcherThread1.count + " searchers created");
+    //System.out.println("Searcher 2: " + searcherThread2.count + " searchers created");
+  }
+
+  /*
+    Run above stress test against RAMDirectory and then
+    FSDirectory.
+  */
+  public void testStressIndexAndSearching() throws Exception {
+    Directory directory = newDirectory();
+    runStressTest(directory, new ConcurrentMergeScheduler());
+    directory.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestStressIndexing2.java b/lucene/backwards/src/test/org/apache/lucene/index/TestStressIndexing2.java
new file mode 100644
index 0000000..9f914d8
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestStressIndexing2.java
@@ -0,0 +1,711 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+
+import junit.framework.Assert;
+
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.StringHelper;
+import org.apache.lucene.util._TestUtil;
+
+public class TestStressIndexing2 extends LuceneTestCase {
+  static int maxFields=4;
+  static int bigFieldSize=10;
+  static boolean sameFieldOrder=false;
+  static int mergeFactor=3;
+  static int maxBufferedDocs=3;
+  static int seed=0;
+
+  public class MockIndexWriter extends IndexWriter {
+
+    public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
+      super(dir, conf);
+    }
+
+    @Override
+    boolean testPoint(String name) {
+      //      if (name.equals("startCommit")) {
+      if (random.nextInt(4) == 2)
+        Thread.yield();
+      return true;
+    }
+  }
+  
+  public void testRandomIWReader() throws Throwable {
+    Directory dir = newDirectory();
+    
+    // TODO: verify equals using IW.getReader
+    DocsAndWriter dw = indexRandomIWReader(5, 3, 100, dir);
+    IndexReader reader = dw.writer.getReader();
+    dw.writer.commit();
+    verifyEquals(random, reader, dir, "id");
+    reader.close();
+    dw.writer.close();
+    dir.close();
+  }
+  
+  public void testRandom() throws Throwable {
+    Directory dir1 = newDirectory();
+    Directory dir2 = newDirectory();
+    // mergeFactor=2; maxBufferedDocs=2; Map docs = indexRandom(1, 3, 2, dir1);
+    int maxThreadStates = 1+random.nextInt(10);
+    boolean doReaderPooling = random.nextBoolean();
+    Map<String,Document> docs = indexRandom(5, 3, 100, dir1, maxThreadStates, doReaderPooling);
+    indexSerial(random, docs, dir2);
+
+    // verifying verify
+    // verifyEquals(dir1, dir1, "id");
+    // verifyEquals(dir2, dir2, "id");
+
+    verifyEquals(dir1, dir2, "id");
+    dir1.close();
+    dir2.close();
+  }
+
+  public void testMultiConfig() throws Throwable {
+    // test lots of smaller different params together
+    int num = atLeast(3);
+    for (int i = 0; i < num; i++) { // increase iterations for better testing
+      if (VERBOSE) {
+        System.out.println("\n\nTEST: top iter=" + i);
+      }
+      sameFieldOrder=random.nextBoolean();
+      mergeFactor=random.nextInt(3)+2;
+      maxBufferedDocs=random.nextInt(3)+2;
+      int maxThreadStates = 1+random.nextInt(10);
+      boolean doReaderPooling = random.nextBoolean();
+      seed++;
+
+      int nThreads=random.nextInt(5)+1;
+      int iter=random.nextInt(5)+1;
+      int range=random.nextInt(20)+1;
+      Directory dir1 = newDirectory();
+      Directory dir2 = newDirectory();
+      if (VERBOSE) {
+        System.out.println("  nThreads=" + nThreads + " iter=" + iter + " range=" + range + " doPooling=" + doReaderPooling + " maxThreadStates=" + maxThreadStates + " sameFieldOrder=" + sameFieldOrder + " mergeFactor=" + mergeFactor);
+      }
+      Map<String,Document> docs = indexRandom(nThreads, iter, range, dir1, maxThreadStates, doReaderPooling);
+      if (VERBOSE) {
+        System.out.println("TEST: index serial");
+      }
+      indexSerial(random, docs, dir2);
+      if (VERBOSE) {
+        System.out.println("TEST: verify");
+      }
+      verifyEquals(dir1, dir2, "id");
+      dir1.close();
+      dir2.close();
+    }
+  }
+
+
+  static Term idTerm = new Term("id","");
+  IndexingThread[] threads;
+  static Comparator<Fieldable> fieldNameComparator = new Comparator<Fieldable>() {
+        public int compare(Fieldable o1, Fieldable o2) {
+          return o1.name().compareTo(o2.name());
+        }
+  };
+
+  // This test avoids using any extra synchronization in the multiple
+  // indexing threads to test that IndexWriter does correctly synchronize
+  // everything.
+  
+  public static class DocsAndWriter {
+    Map<String,Document> docs;
+    IndexWriter writer;
+  }
+  
+  public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
+    Map<String,Document> docs = new HashMap<String,Document>();
+    IndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE).setRAMBufferSizeMB(
+                                                                                                  0.1).setMaxBufferedDocs(maxBufferedDocs).setMergePolicy(newLogMergePolicy()));
+    w.setInfoStream(VERBOSE ? System.out : null);
+    w.commit();
+    setUseCompoundFile(w.getConfig().getMergePolicy(), false);
+    setMergeFactor(w.getConfig().getMergePolicy(), mergeFactor);
+    /***
+        w.setMaxMergeDocs(Integer.MAX_VALUE);
+        w.setMaxFieldLength(10000);
+        w.setRAMBufferSizeMB(1);
+        w.setMergeFactor(10);
+    ***/
+
+    threads = new IndexingThread[nThreads];
+    for (int i=0; i<threads.length; i++) {
+      IndexingThread th = new IndexingThread();
+      th.w = w;
+      th.base = 1000000*i;
+      th.range = range;
+      th.iterations = iterations;
+      threads[i] = th;
+    }
+
+    for (int i=0; i<threads.length; i++) {
+      threads[i].start();
+    }
+    for (int i=0; i<threads.length; i++) {
+      threads[i].join();
+    }
+
+    // w.optimize();
+    //w.close();    
+
+    for (int i=0; i<threads.length; i++) {
+      IndexingThread th = threads[i];
+      synchronized(th) {
+        docs.putAll(th.docs);
+      }
+    }
+
+    _TestUtil.checkIndex(dir);
+    DocsAndWriter dw = new DocsAndWriter();
+    dw.docs = docs;
+    dw.writer = w;
+    return dw;
+  }
+  
+  public Map<String,Document> indexRandom(int nThreads, int iterations, int range, Directory dir, int maxThreadStates,
+                                          boolean doReaderPooling) throws IOException, InterruptedException {
+    Map<String,Document> docs = new HashMap<String,Document>();
+    for(int iter=0;iter<3;iter++) {
+      if (VERBOSE) {
+        System.out.println("TEST: iter=" + iter);
+      }
+      IndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig(
+          TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE)
+               .setRAMBufferSizeMB(0.1).setMaxBufferedDocs(maxBufferedDocs).setMaxThreadStates(maxThreadStates)
+               .setReaderPooling(doReaderPooling).setMergePolicy(newLogMergePolicy()));
+      w.setInfoStream(VERBOSE ? System.out : null);
+      setUseCompoundFile(w.getConfig().getMergePolicy(), false);
+      setMergeFactor(w.getConfig().getMergePolicy(), mergeFactor);
+
+      threads = new IndexingThread[nThreads];
+      for (int i=0; i<threads.length; i++) {
+        IndexingThread th = new IndexingThread();
+        th.w = w;
+        th.base = 1000000*i;
+        th.range = range;
+        th.iterations = iterations;
+        threads[i] = th;
+      }
+
+      for (int i=0; i<threads.length; i++) {
+        threads[i].start();
+      }
+      for (int i=0; i<threads.length; i++) {
+        threads[i].join();
+      }
+
+      // w.optimize();
+      w.close();    
+
+      for (int i=0; i<threads.length; i++) {
+        IndexingThread th = threads[i];
+        synchronized(th) {
+          docs.putAll(th.docs);
+        }
+      }
+    }
+
+    _TestUtil.checkIndex(dir);
+
+    return docs;
+  }
+
+  
+  public static void indexSerial(Random random, Map<String,Document> docs, Directory dir) throws IOException {
+    IndexWriter w = new IndexWriter(dir, LuceneTestCase.newIndexWriterConfig(random, TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setMergePolicy(newLogMergePolicy()));
+
+    // index all docs in a single thread
+    Iterator<Document> iter = docs.values().iterator();
+    while (iter.hasNext()) {
+      Document d = iter.next();
+      ArrayList<Fieldable> fields = new ArrayList<Fieldable>();
+      fields.addAll(d.getFields());
+      // put fields in same order each time
+      Collections.sort(fields, fieldNameComparator);
+      
+      Document d1 = new Document();
+      d1.setBoost(d.getBoost());
+      for (int i=0; i<fields.size(); i++) {
+        d1.add(fields.get(i));
+      }
+      w.addDocument(d1);
+      // System.out.println("indexing "+d1);
+    }
+    
+    w.close();
+  }
+  
+  public static void verifyEquals(Random r, IndexReader r1, Directory dir2, String idField) throws Throwable {
+    IndexReader r2 = IndexReader.open(dir2);
+    verifyEquals(r1, r2, idField);
+    r2.close();
+  }
+
+  public static void verifyEquals(Directory dir1, Directory dir2, String idField) throws Throwable {
+    IndexReader r1 = IndexReader.open(dir1, true);
+    IndexReader r2 = IndexReader.open(dir2, true);
+    verifyEquals(r1, r2, idField);
+    r1.close();
+    r2.close();
+  }
+
+  private static void printDocs(IndexReader r) throws Throwable {
+    IndexReader[] subs = r.getSequentialSubReaders();
+    for(IndexReader sub : subs) {
+      System.out.println("  " + ((SegmentReader) sub).getSegmentInfo());
+      for(int docID=0;docID<sub.maxDoc();docID++) {
+        Document doc = sub.document(docID);
+        if (!sub.isDeleted(docID)) {
+          System.out.println("    docID=" + docID + " id:" + doc.get("id"));
+        } else {
+          System.out.println("    DEL docID=" + docID + " id:" + doc.get("id"));
+        }
+      }
+    }
+  }
+
+
+  public static void verifyEquals(IndexReader r1, IndexReader r2, String idField) throws Throwable {
+    if (VERBOSE) {
+      System.out.println("\nr1 docs:");
+      printDocs(r1);
+      System.out.println("\nr2 docs:");
+      printDocs(r2);
+    }
+    if (r1.numDocs() != r2.numDocs()) {
+      assert false: "r1.numDocs()=" + r1.numDocs() + " vs r2.numDocs()=" + r2.numDocs();
+    }
+    boolean hasDeletes = !(r1.maxDoc()==r2.maxDoc() && r1.numDocs()==r1.maxDoc());
+
+    int[] r2r1 = new int[r2.maxDoc()];   // r2 id to r1 id mapping
+
+    TermDocs termDocs1 = r1.termDocs();
+    TermDocs termDocs2 = r2.termDocs();
+
+    // create mapping from id2 space to id2 based on idField
+    idField = StringHelper.intern(idField);
+    TermEnum termEnum = r1.terms (new Term (idField, ""));
+    do {
+      Term term = termEnum.term();
+      if (term==null || term.field() != idField) break;
+
+      termDocs1.seek (termEnum);
+      if (!termDocs1.next()) {
+        // This doc is deleted and wasn't replaced
+        termDocs2.seek(termEnum);
+        assertFalse(termDocs2.next());
+        continue;
+      }
+
+      int id1 = termDocs1.doc();
+      assertFalse(termDocs1.next());
+
+      termDocs2.seek(termEnum);
+      assertTrue(termDocs2.next());
+      int id2 = termDocs2.doc();
+      assertFalse(termDocs2.next());
+
+      r2r1[id2] = id1;
+
+      // verify stored fields are equivalent
+      try {
+        verifyEquals(r1.document(id1), r2.document(id2));
+      } catch (Throwable t) {
+        System.out.println("FAILED id=" + term + " id1=" + id1 + " id2=" + id2 + " term="+ term);
+        System.out.println("  d1=" + r1.document(id1));
+        System.out.println("  d2=" + r2.document(id2));
+        throw t;
+      }
+
+      try {
+        // verify term vectors are equivalent        
+        verifyEquals(r1.getTermFreqVectors(id1), r2.getTermFreqVectors(id2));
+      } catch (Throwable e) {
+        System.out.println("FAILED id=" + term + " id1=" + id1 + " id2=" + id2);
+        TermFreqVector[] tv1 = r1.getTermFreqVectors(id1);
+        System.out.println("  d1=" + tv1);
+        if (tv1 != null)
+          for(int i=0;i<tv1.length;i++)
+            System.out.println("    " + i + ": " + tv1[i]);
+        
+        TermFreqVector[] tv2 = r2.getTermFreqVectors(id2);
+        System.out.println("  d2=" + tv2);
+        if (tv2 != null)
+          for(int i=0;i<tv2.length;i++)
+            System.out.println("    " + i + ": " + tv2[i]);
+        
+        throw e;
+      }
+
+    } while (termEnum.next());
+
+    termEnum.close();
+
+    // Verify postings
+    TermEnum termEnum1 = r1.terms (new Term ("", ""));
+    TermEnum termEnum2 = r2.terms (new Term ("", ""));
+
+    // pack both doc and freq into single element for easy sorting
+    long[] info1 = new long[r1.numDocs()];
+    long[] info2 = new long[r2.numDocs()];
+
+    for(;;) {
+      Term term1,term2;
+
+      // iterate until we get some docs
+      int len1;
+      for(;;) {
+        len1=0;
+        term1 = termEnum1.term();
+        if (term1==null) break;
+        termDocs1.seek(termEnum1);
+        while (termDocs1.next()) {
+          int d1 = termDocs1.doc();
+          int f1 = termDocs1.freq();
+          info1[len1] = (((long)d1)<<32) | f1;
+          len1++;
+        }
+        if (len1>0) break;
+        if (!termEnum1.next()) break;
+      }
+
+       // iterate until we get some docs
+      int len2;
+      for(;;) {
+        len2=0;
+        term2 = termEnum2.term();
+        if (term2==null) break;
+        termDocs2.seek(termEnum2);
+        while (termDocs2.next()) {
+          int d2 = termDocs2.doc();
+          int f2 = termDocs2.freq();
+          info2[len2] = (((long)r2r1[d2])<<32) | f2;
+          len2++;
+        }
+        if (len2>0) break;
+        if (!termEnum2.next()) break;
+      }
+
+      if (!hasDeletes)
+        assertEquals(termEnum1.docFreq(), termEnum2.docFreq());
+
+      assertEquals(len1, len2);
+      if (len1==0) break;  // no more terms
+
+      assertEquals(term1, term2);
+
+      // sort info2 to get it into ascending docid
+      Arrays.sort(info2, 0, len2);
+
+      // now compare
+      for (int i=0; i<len1; i++) {
+        assertEquals(info1[i], info2[i]);
+      }
+
+      termEnum1.next();
+      termEnum2.next();
+    }
+  }
+
+  public static void verifyEquals(Document d1, Document d2) {
+    List<Fieldable> ff1 = d1.getFields();
+    List<Fieldable> ff2 = d2.getFields();
+
+    Collections.sort(ff1, fieldNameComparator);
+    Collections.sort(ff2, fieldNameComparator);
+
+    assertEquals(ff1 + " : " + ff2, ff1.size(), ff2.size());
+
+    for (int i=0; i<ff1.size(); i++) {
+      Fieldable f1 = ff1.get(i);
+      Fieldable f2 = ff2.get(i);
+      if (f1.isBinary()) {
+        assert(f2.isBinary());
+      } else {
+        String s1 = f1.stringValue();
+        String s2 = f2.stringValue();
+        assertEquals(ff1 + " : " + ff2, s1,s2);
+      }
+    }
+  }
+
+  public static void verifyEquals(TermFreqVector[] d1, TermFreqVector[] d2) {
+    if (d1 == null) {
+      assertTrue(d2 == null);
+      return;
+    }
+    assertTrue(d2 != null);
+
+    assertEquals(d1.length, d2.length);
+    for(int i=0;i<d1.length;i++) {
+      TermFreqVector v1 = d1[i];
+      TermFreqVector v2 = d2[i];
+      if (v1 == null || v2 == null)
+        System.out.println("v1=" + v1 + " v2=" + v2 + " i=" + i + " of " + d1.length);
+      assertEquals(v1.size(), v2.size());
+      int numTerms = v1.size();
+      String[] terms1 = v1.getTerms();
+      String[] terms2 = v2.getTerms();
+      int[] freq1 = v1.getTermFrequencies();
+      int[] freq2 = v2.getTermFrequencies();
+      for(int j=0;j<numTerms;j++) {
+        if (!terms1[j].equals(terms2[j]))
+          assertEquals(terms1[j], terms2[j]);
+        assertEquals(freq1[j], freq2[j]);
+      }
+      if (v1 instanceof TermPositionVector) {
+        assertTrue(v2 instanceof TermPositionVector);
+        TermPositionVector tpv1 = (TermPositionVector) v1;
+        TermPositionVector tpv2 = (TermPositionVector) v2;
+        for(int j=0;j<numTerms;j++) {
+          int[] pos1 = tpv1.getTermPositions(j);
+          int[] pos2 = tpv2.getTermPositions(j);
+          if (pos1 == null) {
+            assertNull(pos2);
+          } else {
+            assertNotNull(pos1);
+            assertNotNull(pos2);
+            assertEquals(pos1.length, pos2.length);
+            TermVectorOffsetInfo[] offsets1 = tpv1.getOffsets(j);
+            TermVectorOffsetInfo[] offsets2 = tpv2.getOffsets(j);
+            if (offsets1 == null)
+              assertTrue(offsets2 == null);
+            else
+              assertTrue(offsets2 != null);
+            for(int k=0;k<pos1.length;k++) {
+              assertEquals(pos1[k], pos2[k]);
+              if (offsets1 != null) {
+                assertEquals(offsets1[k].getStartOffset(),
+                             offsets2[k].getStartOffset());
+                assertEquals(offsets1[k].getEndOffset(),
+                             offsets2[k].getEndOffset());
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
+  private class IndexingThread extends Thread {
+    IndexWriter w;
+    int base;
+    int range;
+    int iterations;
+    Map<String,Document> docs = new HashMap<String,Document>();  
+    Random r;
+
+    public int nextInt(int lim) {
+      return r.nextInt(lim);
+    }
+
+    // start is inclusive and end is exclusive
+    public int nextInt(int start, int end) {
+      return start + r.nextInt(end-start);
+    }
+
+    char[] buffer = new char[100];
+
+    private int addUTF8Token(int start) {
+      final int end = start + nextInt(20);
+      if (buffer.length < 1+end) {
+        char[] newBuffer = new char[(int) ((1+end)*1.25)];
+        System.arraycopy(buffer, 0, newBuffer, 0, buffer.length);
+        buffer = newBuffer;
+      }
+
+      for(int i=start;i<end;i++) {
+        int t = nextInt(6);
+        if (0 == t && i < end-1) {
+          // Make a surrogate pair
+          // High surrogate
+          buffer[i++] = (char) nextInt(0xd800, 0xdc00);
+          // Low surrogate
+          buffer[i] = (char) nextInt(0xdc00, 0xe000);
+        } else if (t <= 1)
+          buffer[i] = (char) nextInt(0x80);
+        else if (2 == t)
+          buffer[i] = (char) nextInt(0x80, 0x800);
+        else if (3 == t)
+          buffer[i] = (char) nextInt(0x800, 0xd800);
+        else if (4 == t)
+          buffer[i] = (char) nextInt(0xe000, 0xffff);
+        else if (5 == t) {
+          // Illegal unpaired surrogate
+          if (r.nextBoolean())
+            buffer[i] = (char) nextInt(0xd800, 0xdc00);
+          else
+            buffer[i] = (char) nextInt(0xdc00, 0xe000);
+        }
+      }
+      buffer[end] = ' ';
+      return 1+end;
+    }
+
+    public String getString(int nTokens) {
+      nTokens = nTokens!=0 ? nTokens : r.nextInt(4)+1;
+
+      // Half the time make a random UTF8 string
+      if (r.nextBoolean())
+        return getUTF8String(nTokens);
+
+      // avoid StringBuffer because it adds extra synchronization.
+      char[] arr = new char[nTokens*2];
+      for (int i=0; i<nTokens; i++) {
+        arr[i*2] = (char)('A' + r.nextInt(10));
+        arr[i*2+1] = ' ';
+      }
+      return new String(arr);
+    }
+    
+    public String getUTF8String(int nTokens) {
+      int upto = 0;
+      Arrays.fill(buffer, (char) 0);
+      for(int i=0;i<nTokens;i++)
+        upto = addUTF8Token(upto);
+      return new String(buffer, 0, upto);
+    }
+
+    public String getIdString() {
+      return Integer.toString(base + nextInt(range));
+    }
+
+    public void indexDoc() throws IOException {
+      Document d = new Document();
+
+      ArrayList<Field> fields = new ArrayList<Field>();      
+      String idString = getIdString();
+      Field idField =  newField(idTerm.field(), idString, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
+      fields.add(idField);
+
+      int nFields = nextInt(maxFields);
+      for (int i=0; i<nFields; i++) {
+
+        Field.TermVector tvVal = Field.TermVector.NO;
+        switch (nextInt(4)) {
+        case 0:
+          tvVal = Field.TermVector.NO;
+          break;
+        case 1:
+          tvVal = Field.TermVector.YES;
+          break;
+        case 2:
+          tvVal = Field.TermVector.WITH_POSITIONS;
+          break;
+        case 3:
+          tvVal = Field.TermVector.WITH_POSITIONS_OFFSETS;
+          break;
+        }
+        
+        switch (nextInt(4)) {
+          case 0:
+            fields.add(newField("f" + nextInt(100), getString(1), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS, tvVal));
+            break;
+          case 1:
+            fields.add(newField("f" + nextInt(100), getString(0), Field.Store.NO, Field.Index.ANALYZED, tvVal));
+            break;
+          case 2:
+            fields.add(newField("f" + nextInt(100), getString(0), Field.Store.YES, Field.Index.NO, Field.TermVector.NO));
+            break;
+          case 3:
+            fields.add(newField("f" + nextInt(100), getString(bigFieldSize), Field.Store.YES, Field.Index.ANALYZED, tvVal));
+            break;          
+        }
+      }
+
+      if (sameFieldOrder) {
+        Collections.sort(fields, fieldNameComparator);
+      } else {
+        // random placement of id field also
+        Collections.swap(fields,nextInt(fields.size()), 0);
+      }
+
+      for (int i=0; i<fields.size(); i++) {
+        d.add(fields.get(i));
+      }
+      if (VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": indexing id:" + idString);
+      }
+      w.updateDocument(idTerm.createTerm(idString), d);
+      //System.out.println(Thread.currentThread().getName() + ": indexing "+d);
+      docs.put(idString, d);
+    }
+
+    public void deleteDoc() throws IOException {
+      String idString = getIdString();
+      if (VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": del id:" + idString);
+      }
+      w.deleteDocuments(idTerm.createTerm(idString));
+      docs.remove(idString);
+    }
+
+    public void deleteByQuery() throws IOException {
+      String idString = getIdString();
+      if (VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": del query id:" + idString);
+      }
+      w.deleteDocuments(new TermQuery(idTerm.createTerm(idString)));
+      docs.remove(idString);
+    }
+
+    @Override
+    public void run() {
+      try {
+        r = new Random(base+range+seed);
+        for (int i=0; i<iterations; i++) {
+          int what = nextInt(100);
+          if (what < 5) {
+            deleteDoc();
+          } else if (what < 10) {
+            deleteByQuery();
+          } else {
+            indexDoc();
+          }
+        }
+      } catch (Throwable e) {
+        e.printStackTrace();
+        Assert.fail(e.toString());
+      }
+
+      synchronized (this) {
+        docs.size();
+      }
+    }
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestStressNRT.java b/lucene/backwards/src/test/org/apache/lucene/index/TestStressNRT.java
new file mode 100644
index 0000000..9bdbb9e
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestStressNRT.java
@@ -0,0 +1,384 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestStressNRT extends LuceneTestCase {
+  volatile IndexReader reader;
+
+  final ConcurrentHashMap<Integer,Long> model = new ConcurrentHashMap<Integer,Long>();
+  Map<Integer,Long> committedModel = new HashMap<Integer,Long>();
+  long snapshotCount;
+  long committedModelClock;
+  volatile int lastId;
+  final String field = "val_l";
+  Object[] syncArr;
+
+  private void initModel(int ndocs) {
+    snapshotCount = 0;
+    committedModelClock = 0;
+    lastId = 0;
+
+    syncArr = new Object[ndocs];
+
+    for (int i=0; i<ndocs; i++) {
+      model.put(i, -1L);
+      syncArr[i] = new Object();
+    }
+    committedModel.putAll(model);
+  }
+
+  public void test() throws Exception {
+    // update variables
+    final int commitPercent = random.nextInt(20);
+    final int softCommitPercent = random.nextInt(100); // what percent of the commits are soft
+    final int deletePercent = random.nextInt(50);
+    final int deleteByQueryPercent = random.nextInt(25);
+    final int ndocs = atLeast(50);
+    final int nWriteThreads = _TestUtil.nextInt(random, 1, TEST_NIGHTLY ? 10 : 5);
+    final int maxConcurrentCommits = _TestUtil.nextInt(random, 1, TEST_NIGHTLY ? 10 : 5);   // number of committers at a time... needed if we want to avoid commit errors due to exceeding the max
+    
+    final boolean tombstones = random.nextBoolean();
+    
+
+    // query variables
+    final AtomicLong operations = new AtomicLong(atLeast(50000));  // number of query operations to perform in total
+
+    final int nReadThreads = _TestUtil.nextInt(random, 1, TEST_NIGHTLY ? 10 : 5);
+    initModel(ndocs);
+
+    if (VERBOSE) {
+      System.out.println("\n");
+      System.out.println("TEST: commitPercent=" + commitPercent);
+      System.out.println("TEST: softCommitPercent=" + softCommitPercent);
+      System.out.println("TEST: deletePercent=" + deletePercent);
+      System.out.println("TEST: deleteByQueryPercent=" + deleteByQueryPercent);
+      System.out.println("TEST: ndocs=" + ndocs);
+      System.out.println("TEST: nWriteThreads=" + nWriteThreads);
+      System.out.println("TEST: nReadThreads=" + nReadThreads);
+      System.out.println("TEST: maxConcurrentCommits=" + maxConcurrentCommits);
+      System.out.println("TEST: tombstones=" + tombstones);
+      System.out.println("TEST: operations=" + operations);
+      System.out.println("\n");
+    }
+
+    final AtomicInteger numCommitting = new AtomicInteger();
+
+    List<Thread> threads = new ArrayList<Thread>();
+
+    Directory dir = newDirectory();
+
+    final RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer.setDoRandomOptimizeAssert(false);
+    writer.w.setInfoStream(VERBOSE ? System.out : null);
+    writer.commit();
+    reader = IndexReader.open(dir);
+
+    for (int i=0; i<nWriteThreads; i++) {
+      Thread thread = new Thread("WRITER"+i) {
+        Random rand = new Random(random.nextInt());
+
+        @Override
+        public void run() {
+          try {
+            while (operations.get() > 0) {
+              int oper = rand.nextInt(100);
+
+              if (oper < commitPercent) {
+                if (numCommitting.incrementAndGet() <= maxConcurrentCommits) {
+                  Map<Integer,Long> newCommittedModel;
+                  long version;
+                  IndexReader oldReader;
+
+                  synchronized(TestStressNRT.this) {
+                    newCommittedModel = new HashMap<Integer,Long>(model);  // take a snapshot
+                    version = snapshotCount++;
+                    oldReader = reader;
+                    oldReader.incRef();  // increment the reference since we will use this for reopening
+                  }
+
+                  IndexReader newReader;
+                  if (rand.nextInt(100) < softCommitPercent) {
+                    // assertU(h.commit("softCommit","true"));
+                    if (random.nextBoolean()) {
+                      if (VERBOSE) {
+                        System.out.println("TEST: " + Thread.currentThread().getName() + ": call writer.getReader");
+                      }
+                      newReader = writer.getReader(true);
+                    } else {
+                      if (VERBOSE) {
+                        System.out.println("TEST: " + Thread.currentThread().getName() + ": reopen reader=" + oldReader + " version=" + version);
+                      }
+                      newReader = oldReader.reopen(writer.w, true);
+                    }
+                  } else {
+                    // assertU(commit());
+                    if (VERBOSE) {
+                      System.out.println("TEST: " + Thread.currentThread().getName() + ": commit+reopen reader=" + oldReader + " version=" + version);
+                    }
+                    writer.commit();
+                    if (VERBOSE) {
+                      System.out.println("TEST: " + Thread.currentThread().getName() + ": now reopen after commit");
+                    }
+                    newReader = oldReader.reopen();
+                  }
+
+                  // Code below assumes newReader comes w/
+                  // extra ref:
+                  if (newReader == oldReader) {
+                    newReader.incRef();
+                  }
+
+                  oldReader.decRef();
+
+                  synchronized(TestStressNRT.this) {
+                    // install the new reader if it's newest (and check the current version since another reader may have already been installed)
+                    //System.out.println(Thread.currentThread().getName() + ": newVersion=" + newReader.getVersion());
+                    assert newReader.getRefCount() > 0;
+                    assert reader.getRefCount() > 0;
+                    if (newReader.getVersion() > reader.getVersion()) {
+                      if (VERBOSE) {
+                        System.out.println("TEST: " + Thread.currentThread().getName() + ": install new reader=" + newReader);
+                      }
+                      reader.decRef();
+                      reader = newReader;
+
+                      // Silly: forces fieldInfos to be
+                      // loaded so we don't hit IOE on later
+                      // reader.toString
+                      newReader.toString();
+
+                      // install this snapshot only if it's newer than the current one
+                      if (version >= committedModelClock) {
+                        if (VERBOSE) {
+                          System.out.println("TEST: " + Thread.currentThread().getName() + ": install new model version=" + version);
+                        }
+                        committedModel = newCommittedModel;
+                        committedModelClock = version;
+                      } else {
+                        if (VERBOSE) {
+                          System.out.println("TEST: " + Thread.currentThread().getName() + ": skip install new model version=" + version);
+                        }
+                      }
+                    } else {
+                      // if the same reader, don't decRef.
+                      if (VERBOSE) {
+                        System.out.println("TEST: " + Thread.currentThread().getName() + ": skip install new reader=" + newReader);
+                      }
+                      newReader.decRef();
+                    }
+                  }
+                }
+                numCommitting.decrementAndGet();
+              } else {
+
+                int id = rand.nextInt(ndocs);
+                Object sync = syncArr[id];
+
+                // set the lastId before we actually change it sometimes to try and
+                // uncover more race conditions between writing and reading
+                boolean before = random.nextBoolean();
+                if (before) {
+                  lastId = id;
+                }
+
+                // We can't concurrently update the same document and retain our invariants of increasing values
+                // since we can't guarantee what order the updates will be executed.
+                synchronized (sync) {
+                  Long val = model.get(id);
+                  long nextVal = Math.abs(val)+1;
+
+                  if (oper < commitPercent + deletePercent) {
+                    // assertU("<delete><id>" + id + "</id></delete>");
+
+                    // add tombstone first
+                    if (tombstones) {
+                      Document d = new Document();
+                      d.add(new Field("id","-"+Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+                      d.add(new Field(field, Long.toString(nextVal), Field.Store.YES, Field.Index.NO));
+                      writer.updateDocument(new Term("id", "-"+Integer.toString(id)), d);
+                    }
+
+                    if (VERBOSE) {
+                      System.out.println("TEST: " + Thread.currentThread().getName() + ": term delDocs id:" + id + " nextVal=" + nextVal);
+                    }
+                    writer.deleteDocuments(new Term("id",Integer.toString(id)));
+                    model.put(id, -nextVal);
+                  } else if (oper < commitPercent + deletePercent + deleteByQueryPercent) {
+                    //assertU("<delete><query>id:" + id + "</query></delete>");
+
+                    // add tombstone first
+                    if (tombstones) {
+                      Document d = new Document();
+                      d.add(new Field("id","-"+Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+                      d.add(new Field(field, Long.toString(nextVal), Field.Store.YES, Field.Index.NO));
+                      writer.updateDocument(new Term("id", "-"+Integer.toString(id)), d);
+                    }
+
+                    if (VERBOSE) {
+                      System.out.println("TEST: " + Thread.currentThread().getName() + ": query delDocs id:" + id + " nextVal=" + nextVal);
+                    }
+                    writer.deleteDocuments(new TermQuery(new Term("id", Integer.toString(id))));
+                    model.put(id, -nextVal);
+                  } else {
+                    // assertU(adoc("id",Integer.toString(id), field, Long.toString(nextVal)));
+                    Document d = new Document();
+                    d.add(newField("id",Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+                    d.add(newField(field, Long.toString(nextVal), Field.Store.YES, Field.Index.NO));
+                    if (VERBOSE) {
+                      System.out.println("TEST: " + Thread.currentThread().getName() + ": u id:" + id + " val=" + nextVal);
+                    }
+                    writer.updateDocument(new Term("id", Integer.toString(id)), d);
+                    if (tombstones) {
+                      // remove tombstone after new addition (this should be optional?)
+                      writer.deleteDocuments(new Term("id","-"+Integer.toString(id)));
+                    }
+                    model.put(id, nextVal);
+                  }
+                }
+
+                if (!before) {
+                  lastId = id;
+                }
+              }
+            }
+          } catch (Throwable e) {
+            System.out.println(Thread.currentThread().getName() + ": FAILED: unexpected exception");
+            e.printStackTrace(System.out);
+            throw new RuntimeException(e);
+          }
+        }
+      };
+
+      threads.add(thread);
+    }
+
+    for (int i=0; i<nReadThreads; i++) {
+      Thread thread = new Thread("READER"+i) {
+        Random rand = new Random(random.nextInt());
+
+        @Override
+        public void run() {
+          try {
+            while (operations.decrementAndGet() >= 0) {
+              // bias toward a recently changed doc
+              int id = rand.nextInt(100) < 25 ? lastId : rand.nextInt(ndocs);
+
+              // when indexing, we update the index, then the model
+              // so when querying, we should first check the model, and then the index
+
+              long val;
+              IndexReader r;
+              synchronized(TestStressNRT.this) {
+                val = committedModel.get(id);
+                r = reader;
+                r.incRef();
+              }
+
+              if (VERBOSE) {
+                System.out.println("TEST: " + Thread.currentThread().getName() + ": s id=" + id + " val=" + val + " r=" + r.getVersion());
+              }
+
+              //  sreq = req("wt","json", "q","id:"+Integer.toString(id), "omitHeader","true");
+              IndexSearcher searcher = new IndexSearcher(r);
+              Query q = new TermQuery(new Term("id",Integer.toString(id)));
+              TopDocs results = searcher.search(q, 10);
+
+              if (results.totalHits == 0 && tombstones) {
+                // if we couldn't find the doc, look for its tombstone
+                q = new TermQuery(new Term("id","-"+Integer.toString(id)));
+                results = searcher.search(q, 1);
+                if (results.totalHits == 0) {
+                  if (val == -1L) {
+                    // expected... no doc was added yet
+                    r.decRef();
+                    continue;
+                  }
+                  fail("No documents or tombstones found for id " + id + ", expected at least " + val + " reader=" + r);
+                }
+              }
+
+              if (results.totalHits == 0 && !tombstones) {
+                // nothing to do - we can't tell anything from a deleted doc without tombstones
+              } else {
+                // we should have found the document, or its tombstone
+                if (results.totalHits != 1) {
+                  System.out.println("FAIL: hits id:" + id + " val=" + val);
+                  for(ScoreDoc sd : results.scoreDocs) {
+                    final Document doc = r.document(sd.doc);
+                    System.out.println("  docID=" + sd.doc + " id:" + doc.get("id") + " foundVal=" + doc.get(field));
+                  }
+                  fail("id=" + id + " reader=" + r + " totalHits=" + results.totalHits);
+                }
+                Document doc = searcher.doc(results.scoreDocs[0].doc);
+                long foundVal = Long.parseLong(doc.get(field));
+                if (foundVal < Math.abs(val)) {
+                  fail("foundVal=" + foundVal + " val=" + val + " id=" + id + " reader=" + r);
+                }
+              }
+
+              r.decRef();
+            }
+          } catch (Throwable e) {
+            operations.set(-1L);
+            System.out.println(Thread.currentThread().getName() + ": FAILED: unexpected exception");
+            e.printStackTrace(System.out);
+            throw new RuntimeException(e);
+          }
+        }
+      };
+
+      threads.add(thread);
+    }
+
+    for (Thread thread : threads) {
+      thread.start();
+    }
+
+    for (Thread thread : threads) {
+      thread.join();
+    }
+
+    writer.close();
+    if (VERBOSE) {
+      System.out.println("TEST: close reader=" + reader);
+    }
+    reader.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestTerm.java b/lucene/backwards/src/test/org/apache/lucene/index/TestTerm.java
new file mode 100644
index 0000000..57ce23d
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestTerm.java
@@ -0,0 +1,36 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestTerm extends LuceneTestCase {
+
+  public void testEquals() {
+    final Term base = new Term("same", "same");
+    final Term same = new Term("same", "same");
+    final Term differentField = new Term("different", "same");
+    final Term differentText = new Term("same", "different");
+    final String differentType = "AString";
+    assertEquals(base, base);
+    assertEquals(base, same);
+    assertFalse(base.equals(differentField));
+    assertFalse(base.equals(differentText));
+    assertFalse(base.equals(differentType));
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestTermVectorsReader.java b/lucene/backwards/src/test/org/apache/lucene/index/TestTermVectorsReader.java
new file mode 100644
index 0000000..a8abdcf
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestTermVectorsReader.java
@@ -0,0 +1,449 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.SortedSet;
+
+import org.apache.lucene.analysis.*;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestTermVectorsReader extends LuceneTestCase {
+  //Must be lexicographically sorted, will do in setup, versus trying to maintain here
+  private String[] testFields = {"f1", "f2", "f3", "f4"};
+  private boolean[] testFieldsStorePos = {true, false, true, false};
+  private boolean[] testFieldsStoreOff = {true, false, false, true};
+  private String[] testTerms = {"this", "is", "a", "test"};
+  private int[][] positions = new int[testTerms.length][];
+  private TermVectorOffsetInfo[][] offsets = new TermVectorOffsetInfo[testTerms.length][];
+  private Directory dir;
+  private String seg;
+  private FieldInfos fieldInfos = new FieldInfos();
+  private static int TERM_FREQ = 3;
+
+  private class TestToken implements Comparable<TestToken> {
+    String text;
+    int pos;
+    int startOffset;
+    int endOffset;
+    public int compareTo(TestToken other) {
+      return pos - other.pos;
+    }
+  }
+
+  TestToken[] tokens = new TestToken[testTerms.length * TERM_FREQ];
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    /*
+    for (int i = 0; i < testFields.length; i++) {
+      fieldInfos.add(testFields[i], true, true, testFieldsStorePos[i], testFieldsStoreOff[i]);
+    }
+    */
+
+    Arrays.sort(testTerms);
+    int tokenUpto = 0;
+    for (int i = 0; i < testTerms.length; i++) {
+      positions[i] = new int[TERM_FREQ];
+      offsets[i] = new TermVectorOffsetInfo[TERM_FREQ];
+      // first position must be 0
+      for (int j = 0; j < TERM_FREQ; j++) {
+        // positions are always sorted in increasing order
+        positions[i][j] = (int) (j * 10 + Math.random() * 10);
+        // offsets are always sorted in increasing order
+        offsets[i][j] = new TermVectorOffsetInfo(j * 10, j * 10 + testTerms[i].length());
+        TestToken token = tokens[tokenUpto++] = new TestToken();
+        token.text = testTerms[i];
+        token.pos = positions[i][j];
+        token.startOffset = offsets[i][j].getStartOffset();
+        token.endOffset = offsets[i][j].getEndOffset();
+      }
+    }
+    Arrays.sort(tokens);
+
+    dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MyAnalyzer()).setMaxBufferedDocs(-1).setMergePolicy(newLogMergePolicy(false, 10)));
+
+    Document doc = new Document();
+    for(int i=0;i<testFields.length;i++) {
+      final Field.TermVector tv;
+      if (testFieldsStorePos[i] && testFieldsStoreOff[i])
+        tv = Field.TermVector.WITH_POSITIONS_OFFSETS;
+      else if (testFieldsStorePos[i] && !testFieldsStoreOff[i])
+        tv = Field.TermVector.WITH_POSITIONS;
+      else if (!testFieldsStorePos[i] && testFieldsStoreOff[i])
+        tv = Field.TermVector.WITH_OFFSETS;
+      else
+        tv = Field.TermVector.YES;
+      doc.add(new Field(testFields[i], "", Field.Store.NO, Field.Index.ANALYZED, tv));
+    }
+
+    //Create 5 documents for testing, they all have the same
+    //terms
+    for(int j=0;j<5;j++)
+      writer.addDocument(doc);
+    writer.commit();
+    seg = writer.newestSegment().name;
+    writer.close();
+
+    fieldInfos = new FieldInfos(dir, IndexFileNames.segmentFileName(seg, IndexFileNames.FIELD_INFOS_EXTENSION));
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    dir.close();
+    super.tearDown();
+  }
+
+  private class MyTokenStream extends TokenStream {
+    private int tokenUpto;
+    
+    private final CharTermAttribute termAtt;
+    private final PositionIncrementAttribute posIncrAtt;
+    private final OffsetAttribute offsetAtt;
+    
+    public MyTokenStream() {
+      termAtt = addAttribute(CharTermAttribute.class);
+      posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+      offsetAtt = addAttribute(OffsetAttribute.class);
+    }
+    
+    @Override
+    public boolean incrementToken() {
+      if (tokenUpto >= tokens.length)
+        return false;
+      else {
+        final TestToken testToken = tokens[tokenUpto++];
+        clearAttributes();
+        termAtt.append(testToken.text);
+        offsetAtt.setOffset(testToken.startOffset, testToken.endOffset);
+        if (tokenUpto > 1) {
+          posIncrAtt.setPositionIncrement(testToken.pos - tokens[tokenUpto-2].pos);
+        } else {
+          posIncrAtt.setPositionIncrement(testToken.pos+1);
+        }
+        return true;
+      }
+    }
+
+    @Override
+    public void reset() throws IOException {
+      super.reset();
+      this.tokenUpto = 0;
+    }
+  }
+
+  private class MyAnalyzer extends Analyzer {
+    @Override
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      return new MyTokenStream();
+    }
+  }
+
+  public void test() throws IOException {
+    //Check to see the files were created properly in setup
+    assertTrue(dir.fileExists(IndexFileNames.segmentFileName(seg, IndexFileNames.VECTORS_DOCUMENTS_EXTENSION)));
+    assertTrue(dir.fileExists(IndexFileNames.segmentFileName(seg, IndexFileNames.VECTORS_INDEX_EXTENSION)));
+  }
+
+  public void testReader() throws IOException {
+    TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
+    for (int j = 0; j < 5; j++) {
+      TermFreqVector vector = reader.get(j, testFields[0]);
+      assertTrue(vector != null);
+      String[] terms = vector.getTerms();
+      assertTrue(terms != null);
+      assertTrue(terms.length == testTerms.length);
+      for (int i = 0; i < terms.length; i++) {
+        String term = terms[i];
+        //System.out.println("Term: " + term);
+        assertTrue(term.equals(testTerms[i]));
+      }
+    }
+    reader.close();
+  }
+
+  public void testPositionReader() throws IOException {
+    TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
+    TermPositionVector vector;
+    String[] terms;
+    vector = (TermPositionVector) reader.get(0, testFields[0]);
+    assertTrue(vector != null);
+    terms = vector.getTerms();
+    assertTrue(terms != null);
+    assertTrue(terms.length == testTerms.length);
+    for (int i = 0; i < terms.length; i++) {
+      String term = terms[i];
+      //System.out.println("Term: " + term);
+      assertTrue(term.equals(testTerms[i]));
+      int[] positions = vector.getTermPositions(i);
+      assertTrue(positions != null);
+      assertTrue(positions.length == this.positions[i].length);
+      for (int j = 0; j < positions.length; j++) {
+        int position = positions[j];
+        assertTrue(position == this.positions[i][j]);
+      }
+      TermVectorOffsetInfo[] offset = vector.getOffsets(i);
+      assertTrue(offset != null);
+      assertTrue(offset.length == this.offsets[i].length);
+      for (int j = 0; j < offset.length; j++) {
+        TermVectorOffsetInfo termVectorOffsetInfo = offset[j];
+        assertTrue(termVectorOffsetInfo.equals(offsets[i][j]));
+      }
+    }
+
+    TermFreqVector freqVector = reader.get(0, testFields[1]); //no pos, no offset
+    assertTrue(freqVector != null);
+    assertTrue(freqVector instanceof TermPositionVector == false);
+    terms = freqVector.getTerms();
+    assertTrue(terms != null);
+    assertTrue(terms.length == testTerms.length);
+    for (int i = 0; i < terms.length; i++) {
+      String term = terms[i];
+      //System.out.println("Term: " + term);
+      assertTrue(term.equals(testTerms[i]));
+    }
+    reader.close();
+  }
+
+  public void testOffsetReader() throws IOException {
+    TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
+    TermPositionVector vector = (TermPositionVector) reader.get(0, testFields[0]);
+    assertTrue(vector != null);
+    String[] terms = vector.getTerms();
+    assertTrue(terms != null);
+    assertTrue(terms.length == testTerms.length);
+    for (int i = 0; i < terms.length; i++) {
+      String term = terms[i];
+      //System.out.println("Term: " + term);
+      assertTrue(term.equals(testTerms[i]));
+      int[] positions = vector.getTermPositions(i);
+      assertTrue(positions != null);
+      assertTrue(positions.length == this.positions[i].length);
+      for (int j = 0; j < positions.length; j++) {
+        int position = positions[j];
+        assertTrue(position == this.positions[i][j]);
+      }
+      TermVectorOffsetInfo[] offset = vector.getOffsets(i);
+      assertTrue(offset != null);
+      assertTrue(offset.length == this.offsets[i].length);
+      for (int j = 0; j < offset.length; j++) {
+        TermVectorOffsetInfo termVectorOffsetInfo = offset[j];
+        assertTrue(termVectorOffsetInfo.equals(offsets[i][j]));
+      }
+    }
+    reader.close();
+  }
+
+  public void testMapper() throws IOException {
+    TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
+    SortedTermVectorMapper mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
+    reader.get(0, mapper);
+    SortedSet<TermVectorEntry> set = mapper.getTermVectorEntrySet();
+    assertTrue("set is null and it shouldn't be", set != null);
+    //three fields, 4 terms, all terms are the same
+    assertTrue("set Size: " + set.size() + " is not: " + 4, set.size() == 4);
+    //Check offsets and positions
+    for (Iterator<TermVectorEntry> iterator = set.iterator(); iterator.hasNext();) {
+      TermVectorEntry tve =  iterator.next();
+      assertTrue("tve is null and it shouldn't be", tve != null);
+      assertTrue("tve.getOffsets() is null and it shouldn't be", tve.getOffsets() != null);
+      assertTrue("tve.getPositions() is null and it shouldn't be", tve.getPositions() != null);
+
+    }
+
+    mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
+    reader.get(1, mapper);
+    set = mapper.getTermVectorEntrySet();
+    assertTrue("set is null and it shouldn't be", set != null);
+    //three fields, 4 terms, all terms are the same
+    assertTrue("set Size: " + set.size() + " is not: " + 4, set.size() == 4);
+    //Should have offsets and positions b/c we are munging all the fields together
+    for (Iterator<TermVectorEntry> iterator = set.iterator(); iterator.hasNext();) {
+      TermVectorEntry tve = iterator.next();
+      assertTrue("tve is null and it shouldn't be", tve != null);
+      assertTrue("tve.getOffsets() is null and it shouldn't be", tve.getOffsets() != null);
+      assertTrue("tve.getPositions() is null and it shouldn't be", tve.getPositions() != null);
+
+    }
+
+
+    FieldSortedTermVectorMapper fsMapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
+    reader.get(0, fsMapper);
+    Map<String,SortedSet<TermVectorEntry>> map = fsMapper.getFieldToTerms();
+    assertTrue("map Size: " + map.size() + " is not: " + testFields.length, map.size() == testFields.length);
+    for (Map.Entry<String,SortedSet<TermVectorEntry>> entry : map.entrySet()) {
+      SortedSet<TermVectorEntry> sortedSet =  entry.getValue();
+      assertTrue("sortedSet Size: " + sortedSet.size() + " is not: " + 4, sortedSet.size() == 4);
+      for (final TermVectorEntry tve : sortedSet) {
+        assertTrue("tve is null and it shouldn't be", tve != null);
+        //Check offsets and positions.
+        assertTrue("tve is null and it shouldn't be", tve != null);
+        String field = tve.getField();
+        if (field.equals(testFields[0])) {
+          //should have offsets
+
+          assertTrue("tve.getOffsets() is null and it shouldn't be", tve.getOffsets() != null);
+          assertTrue("tve.getPositions() is null and it shouldn't be", tve.getPositions() != null);
+        }
+        else if (field.equals(testFields[1])) {
+          //should not have offsets
+
+          assertTrue("tve.getOffsets() is not null and it shouldn't be", tve.getOffsets() == null);
+          assertTrue("tve.getPositions() is not null and it shouldn't be", tve.getPositions() == null);
+        }
+      }
+    }
+    //Try mapper that ignores offs and positions
+    fsMapper = new FieldSortedTermVectorMapper(true, true, new TermVectorEntryFreqSortedComparator());
+    reader.get(0, fsMapper);
+    map = fsMapper.getFieldToTerms();
+    assertTrue("map Size: " + map.size() + " is not: " + testFields.length, map.size() == testFields.length);
+    for (final Map.Entry<String,SortedSet<TermVectorEntry>> entry : map.entrySet()) {
+      SortedSet<TermVectorEntry> sortedSet =  entry.getValue();
+      assertTrue("sortedSet Size: " + sortedSet.size() + " is not: " + 4, sortedSet.size() == 4);
+      for (final TermVectorEntry tve : sortedSet) {
+        assertTrue("tve is null and it shouldn't be", tve != null);
+        //Check offsets and positions.
+        assertTrue("tve is null and it shouldn't be", tve != null);
+        String field = tve.getField();
+        if (field.equals(testFields[0])) {
+          //should have offsets
+
+          assertTrue("tve.getOffsets() is null and it shouldn't be", tve.getOffsets() == null);
+          assertTrue("tve.getPositions() is null and it shouldn't be", tve.getPositions() == null);
+        }
+        else if (field.equals(testFields[1])) {
+          //should not have offsets
+
+          assertTrue("tve.getOffsets() is not null and it shouldn't be", tve.getOffsets() == null);
+          assertTrue("tve.getPositions() is not null and it shouldn't be", tve.getPositions() == null);
+        }
+      }
+    }
+
+    // test setDocumentNumber()
+    IndexReader ir = IndexReader.open(dir, true);
+    DocNumAwareMapper docNumAwareMapper = new DocNumAwareMapper();
+    assertEquals(-1, docNumAwareMapper.getDocumentNumber());
+
+    ir.getTermFreqVector(0, docNumAwareMapper);
+    assertEquals(0, docNumAwareMapper.getDocumentNumber());
+    docNumAwareMapper.setDocumentNumber(-1);
+
+    ir.getTermFreqVector(1, docNumAwareMapper);
+    assertEquals(1, docNumAwareMapper.getDocumentNumber());
+    docNumAwareMapper.setDocumentNumber(-1);
+
+    ir.getTermFreqVector(0, "f1", docNumAwareMapper);
+    assertEquals(0, docNumAwareMapper.getDocumentNumber());
+    docNumAwareMapper.setDocumentNumber(-1);
+
+    ir.getTermFreqVector(1, "f2", docNumAwareMapper);
+    assertEquals(1, docNumAwareMapper.getDocumentNumber());
+    docNumAwareMapper.setDocumentNumber(-1);
+
+    ir.getTermFreqVector(0, "f1", docNumAwareMapper);
+    assertEquals(0, docNumAwareMapper.getDocumentNumber());
+
+    ir.close();
+    reader.close();
+  }
+
+
+  /**
+   * Make sure exceptions and bad params are handled appropriately
+   */
+  public void testBadParams() throws IOException {
+    TermVectorsReader reader = null;
+    try {
+      reader = new TermVectorsReader(dir, seg, fieldInfos);
+      //Bad document number, good field number
+      reader.get(50, testFields[0]);
+      fail();
+    } catch (IOException e) {
+      // expected exception
+    } finally {
+      reader.close();
+    }
+    try {
+      reader = new TermVectorsReader(dir, seg, fieldInfos);
+      //Bad document number, no field
+      reader.get(50);
+      fail();
+    } catch (IOException e) {
+      // expected exception
+    } finally {
+      reader.close();
+    }
+    try {
+      reader = new TermVectorsReader(dir, seg, fieldInfos);
+      //good document number, bad field number
+      TermFreqVector vector = reader.get(0, "f50");
+      assertTrue(vector == null);
+      reader.close();
+    } catch (IOException e) {
+      fail();
+    } finally {
+      reader.close();
+    }
+  }
+
+
+  public static class DocNumAwareMapper extends TermVectorMapper {
+
+    public DocNumAwareMapper() {
+    }
+
+    private int documentNumber = -1;
+
+    @Override
+    public void setExpectations(String field, int numTerms, boolean storeOffsets, boolean storePositions) {
+      if (documentNumber == -1) {
+        throw new RuntimeException("Documentnumber should be set at this point!");
+      }
+    }
+
+    @Override
+    public void map(String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions) {
+      if (documentNumber == -1) {
+        throw new RuntimeException("Documentnumber should be set at this point!");
+      }
+    }
+
+    public int getDocumentNumber() {
+      return documentNumber;
+    }
+
+    @Override
+    public void setDocumentNumber(int documentNumber) {
+      this.documentNumber = documentNumber;
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestTermVectorsWriter.java b/lucene/backwards/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
new file mode 100644
index 0000000..b6a250c
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
@@ -0,0 +1,475 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.CachingTokenFilter;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.TeeSinkTokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/** tests for writing term vectors */
+public class TestTermVectorsWriter extends LuceneTestCase {
+  // LUCENE-1442
+  public void testDoubleOffsetCounting() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( 
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    Field f = newField("field", "abcd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+    doc.add(f);
+    doc.add(f);
+    Field f2 = newField("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+    doc.add(f2);
+    doc.add(f);
+    w.addDocument(doc);
+    w.close();
+
+    IndexReader r = IndexReader.open(dir, true);
+    TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
+
+    // Token "" occurred once
+    assertEquals(1, termOffsets.length);
+    assertEquals(8, termOffsets[0].getStartOffset());
+    assertEquals(8, termOffsets[0].getEndOffset());
+
+    // Token "abcd" occurred three times
+    termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(1);
+    assertEquals(3, termOffsets.length);
+    assertEquals(0, termOffsets[0].getStartOffset());
+    assertEquals(4, termOffsets[0].getEndOffset());
+    assertEquals(4, termOffsets[1].getStartOffset());
+    assertEquals(8, termOffsets[1].getEndOffset());
+    assertEquals(8, termOffsets[2].getStartOffset());
+    assertEquals(12, termOffsets[2].getEndOffset());
+    r.close();
+    dir.close();
+  }
+
+  // LUCENE-1442
+  public void testDoubleOffsetCounting2() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    Field f = newField("field", "abcd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+    doc.add(f);
+    doc.add(f);
+    w.addDocument(doc);
+    w.close();
+
+    IndexReader r = IndexReader.open(dir, true);
+    TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
+    assertEquals(2, termOffsets.length);
+    assertEquals(0, termOffsets[0].getStartOffset());
+    assertEquals(4, termOffsets[0].getEndOffset());
+    assertEquals(5, termOffsets[1].getStartOffset());
+    assertEquals(9, termOffsets[1].getEndOffset());
+    r.close();
+    dir.close();
+  }
+
+  // LUCENE-1448
+  public void testEndOffsetPositionCharAnalyzer() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    Field f = newField("field", "abcd   ", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+    doc.add(f);
+    doc.add(f);
+    w.addDocument(doc);
+    w.close();
+
+    IndexReader r = IndexReader.open(dir, true);
+    TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
+    assertEquals(2, termOffsets.length);
+    assertEquals(0, termOffsets[0].getStartOffset());
+    assertEquals(4, termOffsets[0].getEndOffset());
+    assertEquals(8, termOffsets[1].getStartOffset());
+    assertEquals(12, termOffsets[1].getEndOffset());
+    r.close();
+    dir.close();
+  }
+
+  // LUCENE-1448
+  public void testEndOffsetPositionWithCachingTokenFilter() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer analyzer = new MockAnalyzer(random);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer));
+    Document doc = new Document();
+    TokenStream stream = analyzer.tokenStream("field", new StringReader("abcd   "));
+    stream.reset(); // TODO: wierd to reset before wrapping with CachingTokenFilter... correct?
+    stream = new CachingTokenFilter(stream);
+    Field f = new Field("field", stream, Field.TermVector.WITH_POSITIONS_OFFSETS);
+    doc.add(f);
+    doc.add(f);
+    w.addDocument(doc);
+    w.close();
+
+    IndexReader r = IndexReader.open(dir, true);
+    TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
+    assertEquals(2, termOffsets.length);
+    assertEquals(0, termOffsets[0].getStartOffset());
+    assertEquals(4, termOffsets[0].getEndOffset());
+    assertEquals(8, termOffsets[1].getStartOffset());
+    assertEquals(12, termOffsets[1].getEndOffset());
+    r.close();
+    dir.close();
+  }
+  
+  // LUCENE-1448
+  public void testEndOffsetPositionWithTeeSinkTokenFilter() throws Exception {
+    MockDirectoryWrapper dir = newDirectory();
+    Analyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
+    IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
+    Document doc = new Document();
+    TeeSinkTokenFilter tee = new TeeSinkTokenFilter(analyzer.tokenStream("field", new StringReader("abcd   ")));
+    TokenStream sink = tee.newSinkTokenStream();
+    Field f1 = new Field("field", tee, Field.TermVector.WITH_POSITIONS_OFFSETS);
+    Field f2 = new Field("field", sink, Field.TermVector.WITH_POSITIONS_OFFSETS);
+    doc.add(f1);
+    doc.add(f2);
+    w.addDocument(doc);
+    w.close();
+
+    IndexReader r = IndexReader.open(dir, true);
+    TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
+    assertEquals(2, termOffsets.length);
+    assertEquals(0, termOffsets[0].getStartOffset());
+    assertEquals(4, termOffsets[0].getEndOffset());
+    assertEquals(8, termOffsets[1].getStartOffset());
+    assertEquals(12, termOffsets[1].getEndOffset());
+    r.close();
+    dir.close();
+  }
+  
+  // LUCENE-1448
+  public void testEndOffsetPositionStopFilter() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( 
+        TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT)));
+    Document doc = new Document();
+    Field f = newField("field", "abcd the", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+    doc.add(f);
+    doc.add(f);
+    w.addDocument(doc);
+    w.close();
+
+    IndexReader r = IndexReader.open(dir, true);
+    TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
+    assertEquals(2, termOffsets.length);
+    assertEquals(0, termOffsets[0].getStartOffset());
+    assertEquals(4, termOffsets[0].getEndOffset());
+    assertEquals(9, termOffsets[1].getStartOffset());
+    assertEquals(13, termOffsets[1].getEndOffset());
+    r.close();
+    dir.close();
+  }
+
+  // LUCENE-1448
+  public void testEndOffsetPositionStandard() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( 
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    Field f = newField("field", "abcd the  ", Field.Store.NO,
+        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+    Field f2 = newField("field", "crunch man", Field.Store.NO,
+        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+    doc.add(f);
+    doc.add(f2);
+    w.addDocument(doc);
+    w.close();
+
+    IndexReader r = IndexReader.open(dir, true);
+    TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
+    TermVectorOffsetInfo[] termOffsets = tpv.getOffsets(0);
+    assertEquals(1, termOffsets.length);
+    assertEquals(0, termOffsets[0].getStartOffset());
+    assertEquals(4, termOffsets[0].getEndOffset());
+    termOffsets = tpv.getOffsets(1);
+    assertEquals(11, termOffsets[0].getStartOffset());
+    assertEquals(17, termOffsets[0].getEndOffset());
+    termOffsets = tpv.getOffsets(2);
+    assertEquals(18, termOffsets[0].getStartOffset());
+    assertEquals(21, termOffsets[0].getEndOffset());
+    r.close();
+    dir.close();
+  }
+
+  // LUCENE-1448
+  public void testEndOffsetPositionStandardEmptyField() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( 
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    Field f = newField("field", "", Field.Store.NO,
+                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+    Field f2 = newField("field", "crunch man", Field.Store.NO,
+        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+    doc.add(f);
+    doc.add(f2);
+    w.addDocument(doc);
+    w.close();
+
+    IndexReader r = IndexReader.open(dir, true);
+    TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
+    TermVectorOffsetInfo[] termOffsets = tpv.getOffsets(0);
+    assertEquals(1, termOffsets.length);
+    assertEquals(1, termOffsets[0].getStartOffset());
+    assertEquals(7, termOffsets[0].getEndOffset());
+    termOffsets = tpv.getOffsets(1);
+    assertEquals(8, termOffsets[0].getStartOffset());
+    assertEquals(11, termOffsets[0].getEndOffset());
+    r.close();
+    dir.close();
+  }
+
+  // LUCENE-1448
+  public void testEndOffsetPositionStandardEmptyField2() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( 
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+
+    Field f = newField("field", "abcd", Field.Store.NO,
+                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+    doc.add(f);
+    doc.add(newField("field", "", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+
+    Field f2 = newField("field", "crunch", Field.Store.NO,
+        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+    doc.add(f2);
+
+    w.addDocument(doc);
+    w.close();
+
+    IndexReader r = IndexReader.open(dir, true);
+    TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
+    TermVectorOffsetInfo[] termOffsets = tpv.getOffsets(0);
+    assertEquals(1, termOffsets.length);
+    assertEquals(0, termOffsets[0].getStartOffset());
+    assertEquals(4, termOffsets[0].getEndOffset());
+    termOffsets = tpv.getOffsets(1);
+    assertEquals(6, termOffsets[0].getStartOffset());
+    assertEquals(12, termOffsets[0].getEndOffset());
+    r.close();
+    dir.close();
+  }
+  
+  // LUCENE-1168
+  public void testTermVectorCorruption() throws IOException {
+
+    Directory dir = newDirectory();
+    for(int iter=0;iter<2;iter++) {
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+          TEST_VERSION_CURRENT, new MockAnalyzer(random))
+          .setMaxBufferedDocs(2).setRAMBufferSizeMB(
+              IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(
+              new SerialMergeScheduler()).setMergePolicy(
+              new LogDocMergePolicy()));
+
+      Document document = new Document();
+
+      Field storedField = newField("stored", "stored", Field.Store.YES,
+                                    Field.Index.NO);
+      document.add(storedField);
+      writer.addDocument(document);
+      writer.addDocument(document);
+
+      document = new Document();
+      document.add(storedField);
+      Field termVectorField = newField("termVector", "termVector",
+                                        Field.Store.NO, Field.Index.NOT_ANALYZED,
+                                        Field.TermVector.WITH_POSITIONS_OFFSETS);
+
+      document.add(termVectorField);
+      writer.addDocument(document);
+      writer.optimize();
+      writer.close();
+
+      IndexReader reader = IndexReader.open(dir, true);
+      for(int i=0;i<reader.numDocs();i++) {
+        reader.document(i);
+        reader.getTermFreqVectors(i);
+      }
+      reader.close();
+
+      writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
+          new MockAnalyzer(random)).setMaxBufferedDocs(2)
+          .setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
+          .setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(
+              new LogDocMergePolicy()));
+
+      Directory[] indexDirs = {new MockDirectoryWrapper(random, new RAMDirectory(dir))};
+      writer.addIndexes(indexDirs);
+      writer.optimize();
+      writer.close();
+    }
+    dir.close();
+  }
+
+  // LUCENE-1168
+  public void testTermVectorCorruption2() throws IOException {
+    Directory dir = newDirectory();
+    for(int iter=0;iter<2;iter++) {
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+          TEST_VERSION_CURRENT, new MockAnalyzer(random))
+          .setMaxBufferedDocs(2).setRAMBufferSizeMB(
+              IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(
+              new SerialMergeScheduler()).setMergePolicy(
+              new LogDocMergePolicy()));
+
+      Document document = new Document();
+
+      Field storedField = newField("stored", "stored", Field.Store.YES,
+                                    Field.Index.NO);
+      document.add(storedField);
+      writer.addDocument(document);
+      writer.addDocument(document);
+
+      document = new Document();
+      document.add(storedField);
+      Field termVectorField = newField("termVector", "termVector",
+                                        Field.Store.NO, Field.Index.NOT_ANALYZED,
+                                        Field.TermVector.WITH_POSITIONS_OFFSETS);
+      document.add(termVectorField);
+      writer.addDocument(document);
+      writer.optimize();
+      writer.close();
+
+      IndexReader reader = IndexReader.open(dir, true);
+      assertTrue(reader.getTermFreqVectors(0)==null);
+      assertTrue(reader.getTermFreqVectors(1)==null);
+      assertTrue(reader.getTermFreqVectors(2)!=null);
+      reader.close();
+    }
+    dir.close();
+  }
+
+  // LUCENE-1168
+  public void testTermVectorCorruption3() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(2).setRAMBufferSizeMB(
+            IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(
+            new SerialMergeScheduler()).setMergePolicy(new LogDocMergePolicy()));
+
+    Document document = new Document();
+
+    document = new Document();
+    Field storedField = newField("stored", "stored", Field.Store.YES,
+                                  Field.Index.NO);
+    document.add(storedField);
+    Field termVectorField = newField("termVector", "termVector",
+                                      Field.Store.NO, Field.Index.NOT_ANALYZED,
+                                      Field.TermVector.WITH_POSITIONS_OFFSETS);
+    document.add(termVectorField);
+    for(int i=0;i<10;i++)
+      writer.addDocument(document);
+    writer.close();
+
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
+        new MockAnalyzer(random)).setMaxBufferedDocs(2)
+        .setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
+        .setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(
+            new LogDocMergePolicy()));
+    for(int i=0;i<6;i++)
+      writer.addDocument(document);
+
+    writer.optimize();
+    writer.close();
+
+    IndexReader reader = IndexReader.open(dir, true);
+    for(int i=0;i<10;i++) {
+      reader.getTermFreqVectors(i);
+      reader.document(i);
+    }
+    reader.close();
+    dir.close();
+  }
+  
+  // LUCENE-1008
+  public void testNoTermVectorAfterTermVector() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document document = new Document();
+    document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
+        Field.TermVector.YES));
+    iw.addDocument(document);
+    document = new Document();
+    document.add(newField("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
+                           Field.TermVector.NO));
+    iw.addDocument(document);
+    // Make first segment
+    iw.commit();
+
+    document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
+        Field.TermVector.YES));
+    iw.addDocument(document);
+    // Make 2nd segment
+    iw.commit();
+
+    iw.optimize();
+    iw.close();
+    dir.close();
+  }
+
+  // LUCENE-1010
+  public void testNoTermVectorAfterTermVectorMerge() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document document = new Document();
+    document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
+        Field.TermVector.YES));
+    iw.addDocument(document);
+    iw.commit();
+
+    document = new Document();
+    document.add(newField("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
+                           Field.TermVector.NO));
+    iw.addDocument(document);
+    // Make first segment
+    iw.commit();
+
+    iw.optimize();
+
+    document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
+        Field.TermVector.YES));
+    iw.addDocument(document);
+    // Make 2nd segment
+    iw.commit();
+    iw.optimize();
+
+    iw.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestTermdocPerf.java b/lucene/backwards/src/test/org/apache/lucene/index/TestTermdocPerf.java
new file mode 100644
index 0000000..6d82f49
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestTermdocPerf.java
@@ -0,0 +1,137 @@
+package org.apache.lucene.index;
+
+/**
+ * Copyright 2006 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.Random;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.ReusableAnalyzerBase;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+class RepeatingTokenStream extends Tokenizer {
+  
+  private final Random random;
+  private final float percentDocs;
+  private final int maxTF;
+  private int num;
+  CharTermAttribute termAtt;
+  String value;
+
+   public RepeatingTokenStream(String val, Random random, float percentDocs, int maxTF) {
+     this.value = val;
+     this.random = random;
+     this.percentDocs = percentDocs;
+     this.maxTF = maxTF;
+     this.termAtt = addAttribute(CharTermAttribute.class);
+   }
+
+   @Override
+   public boolean incrementToken() throws IOException {
+     num--;
+     if (num >= 0) {
+       clearAttributes();
+       termAtt.append(value);
+       return true;
+     }
+     return false;
+   }
+
+  @Override
+  public void reset() throws IOException {
+    super.reset();
+    if (random.nextFloat() < percentDocs) {
+      num = random.nextInt(maxTF) + 1;
+    } else {
+      num = 0;
+    }
+  }
+}
+
+
+public class TestTermdocPerf extends LuceneTestCase {
+
+  void addDocs(final Random random, Directory dir, final int ndocs, String field, final String val, final int maxTF, final float percentDocs) throws IOException {
+    final RepeatingTokenStream ts = new RepeatingTokenStream(val, random, percentDocs, maxTF);
+
+    Analyzer analyzer = new Analyzer() {
+      @Override
+      public TokenStream tokenStream(String fieldName, Reader reader) {
+        return ts;
+      }
+    };
+
+    Document doc = new Document();
+    doc.add(newField(field,val, Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, analyzer)
+        .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(100));
+    ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(100);
+
+    for (int i=0; i<ndocs; i++) {
+      writer.addDocument(doc);
+    }
+
+    writer.optimize();
+    writer.close();
+  }
+
+
+  public int doTest(int iter, int ndocs, int maxTF, float percentDocs) throws IOException {
+    Directory dir = newDirectory();
+
+    long start = System.currentTimeMillis();
+    addDocs(random, dir, ndocs, "foo", "val", maxTF, percentDocs);
+    long end = System.currentTimeMillis();
+    if (VERBOSE) System.out.println("milliseconds for creation of " + ndocs + " docs = " + (end-start));
+
+    IndexReader reader = IndexReader.open(dir, true);
+    TermEnum tenum = reader.terms(new Term("foo","val"));
+    TermDocs tdocs = reader.termDocs();
+
+    start = System.currentTimeMillis();
+
+    int ret=0;
+    for (int i=0; i<iter; i++) {
+      tdocs.seek(tenum);
+      while (tdocs.next()) {
+        ret += tdocs.doc();
+      }
+    }
+
+    end = System.currentTimeMillis();
+    if (VERBOSE) System.out.println("milliseconds for " + iter + " TermDocs iteration: " + (end-start));
+
+    return ret;
+  }
+
+  public void testTermDocPerf() throws IOException {
+    // performance test for 10% of documents containing a term
+    // doTest(100000, 10000,3,.1f);
+  }
+
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestThreadedOptimize.java b/lucene/backwards/src/test/org/apache/lucene/index/TestThreadedOptimize.java
new file mode 100644
index 0000000..64400db
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestThreadedOptimize.java
@@ -0,0 +1,136 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.util.English;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+import java.util.Random;
+
+public class TestThreadedOptimize extends LuceneTestCase {
+  
+  private static final Analyzer ANALYZER = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
+
+  private final static int NUM_THREADS = 3;
+  //private final static int NUM_THREADS = 5;
+
+  private final static int NUM_ITER = 1;
+
+  private final static int NUM_ITER2 = 1;
+
+  private volatile boolean failed;
+
+  private void setFailed() {
+    failed = true;
+  }
+
+  public void runTest(Random random, Directory directory) throws Exception {
+
+    IndexWriter writer = new IndexWriter(
+        directory,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, ANALYZER).
+            setOpenMode(OpenMode.CREATE).
+            setMaxBufferedDocs(2).
+            setMergePolicy(newLogMergePolicy())
+    );
+
+    for(int iter=0;iter<NUM_ITER;iter++) {
+      final int iterFinal = iter;
+
+      setMergeFactor(writer.getConfig().getMergePolicy(), 100);
+
+      for(int i=0;i<200;i++) {
+        Document d = new Document();
+        d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+        d.add(newField("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED_NO_NORMS));
+        writer.addDocument(d);
+      }
+
+      ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
+      writer.setInfoStream(VERBOSE ? System.out : null);
+
+      Thread[] threads = new Thread[NUM_THREADS];
+      
+      for(int i=0;i<NUM_THREADS;i++) {
+        final int iFinal = i;
+        final IndexWriter writerFinal = writer;
+        threads[i] = new Thread() {
+          @Override
+          public void run() {
+            try {
+              for(int j=0;j<NUM_ITER2;j++) {
+                writerFinal.optimize(false);
+                for(int k=0;k<17*(1+iFinal);k++) {
+                  Document d = new Document();
+                  d.add(newField("id", iterFinal + "_" + iFinal + "_" + j + "_" + k, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+                  d.add(newField("contents", English.intToEnglish(iFinal+k), Field.Store.NO, Field.Index.ANALYZED_NO_NORMS));
+                  writerFinal.addDocument(d);
+                }
+                for(int k=0;k<9*(1+iFinal);k++)
+                  writerFinal.deleteDocuments(new Term("id", iterFinal + "_" + iFinal + "_" + j + "_" + k));
+                writerFinal.optimize();
+              }
+            } catch (Throwable t) {
+              setFailed();
+              System.out.println(Thread.currentThread().getName() + ": hit exception");
+              t.printStackTrace(System.out);
+            }
+          }
+        };
+      }
+
+      for(int i=0;i<NUM_THREADS;i++)
+        threads[i].start();
+
+      for(int i=0;i<NUM_THREADS;i++)
+        threads[i].join();
+
+      assertTrue(!failed);
+
+      final int expectedDocCount = (int) ((1+iter)*(200+8*NUM_ITER2*(NUM_THREADS/2.0)*(1+NUM_THREADS)));
+
+      assertEquals("index=" + writer.segString() + " numDocs=" + writer.numDocs() + " maxDoc=" + writer.maxDoc() + " config=" + writer.getConfig(), expectedDocCount, writer.numDocs());
+      assertEquals("index=" + writer.segString() + " numDocs=" + writer.numDocs() + " maxDoc=" + writer.maxDoc() + " config=" + writer.getConfig(), expectedDocCount, writer.maxDoc());
+
+      writer.close();
+      writer = new IndexWriter(directory, newIndexWriterConfig(
+          TEST_VERSION_CURRENT, ANALYZER).setOpenMode(
+          OpenMode.APPEND).setMaxBufferedDocs(2));
+      
+      IndexReader reader = IndexReader.open(directory, true);
+      assertTrue("reader=" + reader, reader.isOptimized());
+      assertEquals(expectedDocCount, reader.numDocs());
+      reader.close();
+    }
+    writer.close();
+  }
+
+  public void testThreadedOptimize() throws Exception {
+    Directory directory = newDirectory();
+    runTest(random, directory);
+    directory.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestTieredMergePolicy.java b/lucene/backwards/src/test/org/apache/lucene/index/TestTieredMergePolicy.java
new file mode 100644
index 0000000..a4ea7fc
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestTieredMergePolicy.java
@@ -0,0 +1,110 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestTieredMergePolicy extends LuceneTestCase {
+
+  public void testExpungeDeletes() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    TieredMergePolicy tmp = newTieredMergePolicy();
+    conf.setMergePolicy(tmp);
+    conf.setMaxBufferedDocs(4);
+    tmp.setMaxMergeAtOnce(100);
+    tmp.setSegmentsPerTier(100);
+    tmp.setExpungeDeletesPctAllowed(30.0);
+    IndexWriter w = new IndexWriter(dir, conf);
+    w.setInfoStream(VERBOSE ? System.out : null);
+    for(int i=0;i<80;i++) {
+      Document doc = new Document();
+      doc.add(newField("content", "aaa " + (i%4), Field.Store.NO, Field.Index.ANALYZED));
+      w.addDocument(doc);
+    }
+    assertEquals(80, w.maxDoc());
+    assertEquals(80, w.numDocs());
+
+    if (VERBOSE) {
+      System.out.println("\nTEST: delete docs");
+    }
+    w.deleteDocuments(new Term("content", "0"));
+    w.expungeDeletes();
+
+    assertEquals(80, w.maxDoc());
+    assertEquals(60, w.numDocs());
+
+    if (VERBOSE) {
+      System.out.println("\nTEST: expunge2");
+    }
+    tmp.setExpungeDeletesPctAllowed(10.0);
+    w.expungeDeletes();
+    assertEquals(60, w.maxDoc());
+    assertEquals(60, w.numDocs());
+    w.close();
+    dir.close();
+  }
+
+  public void testPartialOptimize() throws Exception {
+    int num = atLeast(10);
+    for(int iter=0;iter<num;iter++) {
+      if (VERBOSE) {
+        System.out.println("TEST: iter=" + iter);
+      }
+      Directory dir = newDirectory();
+      IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+      conf.setMergeScheduler(new SerialMergeScheduler());
+      TieredMergePolicy tmp = newTieredMergePolicy();
+      conf.setMergePolicy(tmp);
+      conf.setMaxBufferedDocs(2);
+      tmp.setMaxMergeAtOnce(3);
+      tmp.setSegmentsPerTier(6);
+
+      IndexWriter w = new IndexWriter(dir, conf);
+      w.setInfoStream(VERBOSE ? System.out : null);
+      int maxCount = 0;
+      final int numDocs = _TestUtil.nextInt(random, 20, 100);
+      for(int i=0;i<numDocs;i++) {
+        Document doc = new Document();
+        doc.add(newField("content", "aaa " + (i%4), Field.Store.NO, Field.Index.ANALYZED));
+        w.addDocument(doc);
+        int count = w.getSegmentCount();
+        maxCount = Math.max(count, maxCount);
+        assertTrue("count=" + count + " maxCount=" + maxCount, count >= maxCount-3);
+      }
+
+      w.flush(true, true);
+
+      int segmentCount = w.getSegmentCount();
+      int targetCount = _TestUtil.nextInt(random, 1, segmentCount);
+      if (VERBOSE) {
+        System.out.println("TEST: optimize to " + targetCount + " segs (current count=" + segmentCount + ")");
+      }
+      w.optimize(targetCount);
+      assertEquals(targetCount, w.getSegmentCount());
+
+      w.close();
+      dir.close();
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestTransactionRollback.java b/lucene/backwards/src/test/org/apache/lucene/index/TestTransactionRollback.java
new file mode 100644
index 0000000..7d64398
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestTransactionRollback.java
@@ -0,0 +1,214 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import java.io.IOException;
+import java.util.BitSet;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.store.Directory;
+
+/**
+ * Test class to illustrate using IndexDeletionPolicy to provide multi-level rollback capability.
+ * This test case creates an index of records 1 to 100, introducing a commit point every 10 records.
+ * 
+ * A "keep all" deletion policy is used to ensure we keep all commit points for testing purposes
+ */
+
+public class TestTransactionRollback extends LuceneTestCase {
+	
+  private static final String FIELD_RECORD_ID = "record_id";
+  private Directory dir;
+	
+  //Rolls back index to a chosen ID
+  private void rollBackLast(int id) throws Exception {
+		
+    // System.out.println("Attempting to rollback to "+id);
+    String ids="-"+id;
+    IndexCommit last=null;
+    Collection<IndexCommit> commits = IndexReader.listCommits(dir);
+    for (Iterator<IndexCommit> iterator = commits.iterator(); iterator.hasNext();) {
+      IndexCommit commit =  iterator.next();
+      Map<String,String> ud=commit.getUserData();
+      if (ud.size() > 0)
+        if (ud.get("index").endsWith(ids))
+          last=commit;
+    }
+
+    if (last==null)
+      throw new RuntimeException("Couldn't find commit point "+id);
+		
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(
+        new RollbackDeletionPolicy(id)).setIndexCommit(last));
+    Map<String,String> data = new HashMap<String,String>();
+    data.put("index", "Rolled back to 1-"+id);
+    w.commit(data);
+    w.close();
+  }
+
+  public void testRepeatedRollBacks() throws Exception {		
+
+    int expectedLastRecordId=100;
+    while (expectedLastRecordId>10) {
+      expectedLastRecordId -=10;			
+      rollBackLast(expectedLastRecordId);
+      
+      BitSet expecteds = new BitSet(100);
+      expecteds.set(1,(expectedLastRecordId+1),true);
+      checkExpecteds(expecteds);			
+    }
+  }
+	
+  private void checkExpecteds(BitSet expecteds) throws Exception {
+    IndexReader r = IndexReader.open(dir, true);
+		
+    //Perhaps not the most efficient approach but meets our needs here.
+    for (int i = 0; i < r.maxDoc(); i++) {
+      if(!r.isDeleted(i)) {
+        String sval=r.document(i).get(FIELD_RECORD_ID);
+        if(sval!=null) {
+          int val=Integer.parseInt(sval);
+          assertTrue("Did not expect document #"+val, expecteds.get(val));
+          expecteds.set(val,false);
+        }
+      }
+    }
+    r.close();
+    assertEquals("Should have 0 docs remaining ", 0 ,expecteds.cardinality());
+  }
+
+  /*
+  private void showAvailableCommitPoints() throws Exception {
+    Collection commits = IndexReader.listCommits(dir);
+    for (Iterator iterator = commits.iterator(); iterator.hasNext();) {
+      IndexCommit comm = (IndexCommit) iterator.next();
+      System.out.print("\t Available commit point:["+comm.getUserData()+"] files=");
+      Collection files = comm.getFileNames();
+      for (Iterator iterator2 = files.iterator(); iterator2.hasNext();) {
+        String filename = (String) iterator2.next();
+        System.out.print(filename+", ");				
+      }
+      System.out.println();
+    }
+  }
+  */
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    dir = newDirectory();
+    //Build index, of records 1 to 100, committing after each batch of 10
+    IndexDeletionPolicy sdp=new KeepAllDeletionPolicy();
+    IndexWriter w=new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(sdp));
+    for(int currentRecordId=1;currentRecordId<=100;currentRecordId++) {
+      Document doc=new Document();
+      doc.add(newField(FIELD_RECORD_ID,""+currentRecordId,Field.Store.YES,Field.Index.ANALYZED));
+      w.addDocument(doc);
+			
+      if (currentRecordId%10 == 0) {
+        Map<String,String> data = new HashMap<String,String>();
+        data.put("index", "records 1-"+currentRecordId);
+        w.commit(data);
+      }
+    }
+
+    w.close();
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    dir.close();
+    super.tearDown();
+  }
+
+  // Rolls back to previous commit point
+  class RollbackDeletionPolicy implements IndexDeletionPolicy {
+    private int rollbackPoint;
+
+    public RollbackDeletionPolicy(int rollbackPoint) {
+      this.rollbackPoint = rollbackPoint;
+    }
+
+    public void onCommit(List<? extends IndexCommit> commits) throws IOException {
+    }
+
+    public void onInit(List<? extends IndexCommit> commits) throws IOException {
+      for (final IndexCommit commit : commits) {
+        Map<String,String> userData=commit.getUserData();
+        if (userData.size() > 0) {
+          // Label for a commit point is "Records 1-30"
+          // This code reads the last id ("30" in this example) and deletes it
+          // if it is after the desired rollback point
+          String x = userData.get("index");
+          String lastVal = x.substring(x.lastIndexOf("-")+1);
+          int last = Integer.parseInt(lastVal);
+          if (last>rollbackPoint) {
+            /*
+            System.out.print("\tRolling back commit point:" +
+                             " UserData="+commit.getUserData() +")  ("+(commits.size()-1)+" commit points left) files=");
+            Collection files = commit.getFileNames();
+            for (Iterator iterator2 = files.iterator(); iterator2.hasNext();) {
+              System.out.print(" "+iterator2.next());				
+            }
+            System.out.println();
+            */
+						
+            commit.delete();									
+          }
+        }
+      }
+    }		
+  }
+
+  class DeleteLastCommitPolicy implements IndexDeletionPolicy {
+
+    public void onCommit(List<? extends IndexCommit> commits) throws IOException {}
+
+    public void onInit(List<? extends IndexCommit> commits) throws IOException {
+      commits.get(commits.size()-1).delete();
+    }
+  }
+
+  public void testRollbackDeletionPolicy() throws Exception {		
+    for(int i=0;i<2;i++) {
+      // Unless you specify a prior commit point, rollback
+      // should not work:
+      new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
+          .setIndexDeletionPolicy(new DeleteLastCommitPolicy())).close();
+      IndexReader r = IndexReader.open(dir, true);
+      assertEquals(100, r.numDocs());
+      r.close();
+    }
+  }
+	
+  // Keeps all commit points (used to build index)
+  class KeepAllDeletionPolicy implements IndexDeletionPolicy {
+    public void onCommit(List<? extends IndexCommit> commits) throws IOException {}
+    public void onInit(List<? extends IndexCommit> commits) throws IOException {}
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestTransactions.java b/lucene/backwards/src/test/org/apache/lucene/index/TestTransactions.java
new file mode 100644
index 0000000..6e5419f
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestTransactions.java
@@ -0,0 +1,237 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.English;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestTransactions extends LuceneTestCase {
+  
+  private static volatile boolean doFail;
+
+  private class RandomFailure extends MockDirectoryWrapper.Failure {
+    @Override
+    public void eval(MockDirectoryWrapper dir) throws IOException {
+      if (TestTransactions.doFail && random.nextInt() % 10 <= 3)
+        throw new IOException("now failing randomly but on purpose");
+    }
+  }
+
+  private static abstract class TimedThread extends Thread {
+    volatile boolean failed;
+    private static float RUN_TIME_MSEC = atLeast(500);
+    private TimedThread[] allThreads;
+
+    abstract public void doWork() throws Throwable;
+
+    TimedThread(TimedThread[] threads) {
+      this.allThreads = threads;
+    }
+
+    @Override
+    public void run() {
+      final long stopTime = System.currentTimeMillis() + (long) (RUN_TIME_MSEC);
+
+      try {
+        do {
+          if (anyErrors()) break;
+          doWork();
+        } while (System.currentTimeMillis() < stopTime);
+      } catch (Throwable e) {
+        System.out.println(Thread.currentThread() + ": exc");
+        e.printStackTrace(System.out);
+        failed = true;
+      }
+    }
+
+    private boolean anyErrors() {
+      for(int i=0;i<allThreads.length;i++)
+        if (allThreads[i] != null && allThreads[i].failed)
+          return true;
+      return false;
+    }
+  }
+
+  private class IndexerThread extends TimedThread {
+    Directory dir1;
+    Directory dir2;
+    Object lock;
+    int nextID;
+
+    public IndexerThread(Object lock, Directory dir1, Directory dir2, TimedThread[] threads) {
+      super(threads);
+      this.lock = lock;
+      this.dir1 = dir1;
+      this.dir2 = dir2;
+    }
+
+    @Override
+    public void doWork() throws Throwable {
+
+      IndexWriter writer1 = new IndexWriter(
+          dir1,
+          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+              setMaxBufferedDocs(3).
+              setMergeScheduler(new ConcurrentMergeScheduler()).
+              setMergePolicy(newLogMergePolicy(2))
+      );
+      ((ConcurrentMergeScheduler) writer1.getConfig().getMergeScheduler()).setSuppressExceptions();
+
+      // Intentionally use different params so flush/merge
+      // happen @ different times
+      IndexWriter writer2 = new IndexWriter(
+          dir2,
+          newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+              setMaxBufferedDocs(2).
+              setMergeScheduler(new ConcurrentMergeScheduler()).
+              setMergePolicy(newLogMergePolicy(3))
+      );
+      ((ConcurrentMergeScheduler) writer2.getConfig().getMergeScheduler()).setSuppressExceptions();
+
+      update(writer1);
+      update(writer2);
+
+      TestTransactions.doFail = true;
+      try {
+        synchronized(lock) {
+          try {
+            writer1.prepareCommit();
+          } catch (Throwable t) {
+            writer1.rollback();
+            writer2.rollback();
+            return;
+          }
+          try {
+            writer2.prepareCommit();
+          } catch (Throwable t) { 	
+            writer1.rollback();
+            writer2.rollback();
+            return;
+          }
+
+          writer1.commit();
+          writer2.commit();
+        }
+      } finally {
+        TestTransactions.doFail = false;
+      }  
+
+      writer1.close();
+      writer2.close();
+    }
+
+    public void update(IndexWriter writer) throws IOException {
+      // Add 10 docs:
+      for(int j=0; j<10; j++) {
+        Document d = new Document();
+        int n = random.nextInt();
+        d.add(newField("id", Integer.toString(nextID++), Field.Store.YES, Field.Index.NOT_ANALYZED));
+        d.add(newField("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.ANALYZED));
+        writer.addDocument(d);
+      }
+
+      // Delete 5 docs:
+      int deleteID = nextID-1;
+      for(int j=0; j<5; j++) {
+        writer.deleteDocuments(new Term("id", ""+deleteID));
+        deleteID -= 2;
+      }
+    }
+  }
+
+  private static class SearcherThread extends TimedThread {
+    Directory dir1;
+    Directory dir2;
+    Object lock;
+
+    public SearcherThread(Object lock, Directory dir1, Directory dir2, TimedThread[] threads) {
+      super(threads);
+      this.lock = lock;
+      this.dir1 = dir1;
+      this.dir2 = dir2;
+    }
+
+    @Override
+    public void doWork() throws Throwable {
+      IndexReader r1, r2;
+      synchronized(lock) {
+        r1 = IndexReader.open(dir1, true);
+        r2 = IndexReader.open(dir2, true);
+      }
+      if (r1.numDocs() != r2.numDocs())
+        throw new RuntimeException("doc counts differ: r1=" + r1.numDocs() + " r2=" + r2.numDocs());
+      r1.close();
+      r2.close();
+    }
+  }
+
+  public void initIndex(Directory dir) throws Throwable {
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    for(int j=0; j<7; j++) {
+      Document d = new Document();
+      int n = random.nextInt();
+      d.add(newField("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.ANALYZED));
+      writer.addDocument(d);
+    }
+    writer.close();
+  }
+
+  public void testTransactions() throws Throwable {
+    // we cant use non-ramdir on windows, because this test needs to double-write.
+    MockDirectoryWrapper dir1 = new MockDirectoryWrapper(random, new RAMDirectory());
+    MockDirectoryWrapper dir2 = new MockDirectoryWrapper(random, new RAMDirectory());
+    dir1.setPreventDoubleWrite(false);
+    dir2.setPreventDoubleWrite(false);
+    dir1.failOn(new RandomFailure());
+    dir2.failOn(new RandomFailure());
+
+    initIndex(dir1);
+    initIndex(dir2);
+
+    TimedThread[] threads = new TimedThread[3];
+    int numThread = 0;
+
+    IndexerThread indexerThread = new IndexerThread(this, dir1, dir2, threads);
+    threads[numThread++] = indexerThread;
+    indexerThread.start();
+
+    SearcherThread searcherThread1 = new SearcherThread(this, dir1, dir2, threads);
+    threads[numThread++] = searcherThread1;
+    searcherThread1.start();
+
+    SearcherThread searcherThread2 = new SearcherThread(this, dir1, dir2, threads);
+    threads[numThread++] = searcherThread2;
+    searcherThread2.start();
+
+    for(int i=0;i<numThread;i++)
+      threads[i].join();
+
+    for(int i=0;i<numThread;i++)
+      assertTrue(!threads[i].failed);
+    dir1.close();
+    dir2.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestUniqueTermCount.java b/lucene/backwards/src/test/org/apache/lucene/index/TestUniqueTermCount.java
new file mode 100644
index 0000000..375fc58
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestUniqueTermCount.java
@@ -0,0 +1,107 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.HashSet;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+/**
+ * Tests the uniqueTermCount statistic in FieldInvertState
+ */
+public class TestUniqueTermCount extends LuceneTestCase { 
+  Directory dir;
+  IndexReader reader;
+  /* expected uniqueTermCount values for our documents */
+  ArrayList<Integer> expected = new ArrayList<Integer>();
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    dir = newDirectory();
+    IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, 
+                                                    new MockAnalyzer(random, MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy());
+    config.setSimilarity(new TestSimilarity());
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir, config);
+    Document doc = new Document();
+    Field foo = newField("foo", "", Field.Store.NO, Field.Index.ANALYZED);
+    doc.add(foo);
+    for (int i = 0; i < 100; i++) {
+      foo.setValue(addValue());
+      writer.addDocument(doc);
+    }
+    reader = writer.getReader();
+    writer.close();
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    reader.close();
+    dir.close();
+    super.tearDown();
+  }
+  
+  public void test() throws Exception {
+    byte fooNorms[] = reader.norms("foo");
+    for (int i = 0; i < reader.maxDoc(); i++)
+      assertEquals(expected.get(i).intValue(), fooNorms[i] & 0xff);
+  }
+
+  /**
+   * Makes a bunch of single-char tokens (the max # unique terms will at most be 26).
+   * puts the # unique terms into expected, to be checked against the norm.
+   */
+  private String addValue() {
+    StringBuilder sb = new StringBuilder();
+    HashSet<String> terms = new HashSet<String>();
+    int num = _TestUtil.nextInt(random, 0, 255);
+    for (int i = 0; i < num; i++) {
+      sb.append(' ');
+      char term = (char) _TestUtil.nextInt(random, 'a', 'z');
+      sb.append(term);
+      terms.add("" + term);
+    }
+    expected.add(terms.size());
+    return sb.toString();
+  }
+  
+  /**
+   * Simple similarity that encodes maxTermFrequency directly as a byte
+   */
+  class TestSimilarity extends DefaultSimilarity {
+
+    @Override
+    public byte encodeNormValue(float f) {
+      return (byte) f;
+    }
+
+    @Override
+    public float computeNorm(String field, FieldInvertState state) {
+      return (float) state.getUniqueTermCount();
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/TestWordlistLoader.java b/lucene/backwards/src/test/org/apache/lucene/index/TestWordlistLoader.java
new file mode 100644
index 0000000..0730523
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/TestWordlistLoader.java
@@ -0,0 +1,81 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.StringReader;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+import org.apache.lucene.analysis.WordlistLoader;
+
+public class TestWordlistLoader extends LuceneTestCase {
+
+  public void testWordlistLoading() throws IOException {
+    String s = "ONE\n  two \nthree";
+    HashSet<String> wordSet1 = WordlistLoader.getWordSet(new StringReader(s));
+    checkSet(wordSet1);
+    HashSet<String> wordSet2 = WordlistLoader.getWordSet(new BufferedReader(new StringReader(s)));
+    checkSet(wordSet2);
+  }
+
+  public void testComments() throws Exception {
+    String s = "ONE\n  two \nthree\n#comment";
+    HashSet<String> wordSet1 = WordlistLoader.getWordSet(new StringReader(s), "#");
+    checkSet(wordSet1);
+    assertFalse(wordSet1.contains("#comment"));
+    assertFalse(wordSet1.contains("comment"));
+  }
+
+
+  private void checkSet(HashSet<String> wordset) {
+    assertEquals(3, wordset.size());
+    assertTrue(wordset.contains("ONE"));		// case is not modified
+    assertTrue(wordset.contains("two"));		// surrounding whitespace is removed
+    assertTrue(wordset.contains("three"));
+    assertFalse(wordset.contains("four"));
+  }
+
+  /**
+   * Test stopwords in snowball format
+   */
+  public void testSnowballListLoading() throws IOException {
+    String s = 
+      "|comment\n" + // commented line
+      " |comment\n" + // commented line with leading whitespace
+      "\n" + // blank line
+      "  \t\n" + // line with only whitespace
+      " |comment | comment\n" + // commented line with comment
+      "ONE\n" + // stopword, in uppercase
+      "   two   \n" + // stopword with leading/trailing space
+      " three   four five \n" + // multiple stopwords
+      "six seven | comment\n"; //multiple stopwords + comment
+    Set<String> wordset = WordlistLoader.getSnowballWordSet(new StringReader(s));
+    assertEquals(7, wordset.size());
+    assertTrue(wordset.contains("ONE"));
+    assertTrue(wordset.contains("two"));
+    assertTrue(wordset.contains("three"));
+    assertTrue(wordset.contains("four"));
+    assertTrue(wordset.contains("five"));
+    assertTrue(wordset.contains("six"));
+    assertTrue(wordset.contains("seven"));
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.19.cfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.19.cfs.zip
new file mode 100644
index 0000000..4fd9b32
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.19.cfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.19.nocfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.19.nocfs.zip
new file mode 100644
index 0000000..e0d9142
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.19.nocfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.20.cfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.20.cfs.zip
new file mode 100644
index 0000000..4b931ae
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.20.cfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.20.nocfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.20.nocfs.zip
new file mode 100644
index 0000000..1275cdf
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.20.nocfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.21.cfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.21.cfs.zip
new file mode 100644
index 0000000..473c138
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.21.cfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.21.nocfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.21.nocfs.zip
new file mode 100644
index 0000000..d0582d0
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.21.nocfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.22.cfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.22.cfs.zip
new file mode 100644
index 0000000..1236307
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.22.cfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.22.nocfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.22.nocfs.zip
new file mode 100644
index 0000000..216ddf3
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.22.nocfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.23.cfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.23.cfs.zip
new file mode 100644
index 0000000..b5fdeef
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.23.cfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.23.nocfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.23.nocfs.zip
new file mode 100644
index 0000000..9137ae6
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.23.nocfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.24.cfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.24.cfs.zip
new file mode 100644
index 0000000..2c666a9
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.24.cfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.24.nocfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.24.nocfs.zip
new file mode 100644
index 0000000..c223875
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.24.nocfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.29.cfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.29.cfs.zip
new file mode 100644
index 0000000..c694c78
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.29.cfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.29.nocfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.29.nocfs.zip
new file mode 100644
index 0000000..298cab7
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.29.nocfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.30.cfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.30.cfs.zip
new file mode 100644
index 0000000..d5978c8
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.30.cfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.30.nocfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.30.nocfs.zip
new file mode 100644
index 0000000..28cd83b
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.30.nocfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.31.cfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.31.cfs.zip
new file mode 100644
index 0000000..8f123a7
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.31.cfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.31.nocfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.31.nocfs.zip
new file mode 100644
index 0000000..21434e1
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.31.nocfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.31.optimized.cfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.31.optimized.cfs.zip
new file mode 100644
index 0000000..200c710
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.31.optimized.cfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.31.optimized.nocfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.31.optimized.nocfs.zip
new file mode 100644
index 0000000..9a158f1
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.31.optimized.nocfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.32.cfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.32.cfs.zip
new file mode 100644
index 0000000..eff3153
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.32.cfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/index.32.nocfs.zip b/lucene/backwards/src/test/org/apache/lucene/index/index.32.nocfs.zip
new file mode 100644
index 0000000..0b345da
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/index.32.nocfs.zip
Binary files differ
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/wordliststopwords.txt b/lucene/backwards/src/test/org/apache/lucene/index/wordliststopwords.txt
new file mode 100644
index 0000000..7d35507
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/wordliststopwords.txt
@@ -0,0 +1,5 @@
+#comment
+ONE
+two
+#comment
+three
diff --git a/lucene/backwards/src/test/org/apache/lucene/index/wordliststopwords_nocomment.txt b/lucene/backwards/src/test/org/apache/lucene/index/wordliststopwords_nocomment.txt
new file mode 100644
index 0000000..59cb04e
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/index/wordliststopwords_nocomment.txt
@@ -0,0 +1,3 @@
+ONE
+two
+three
diff --git a/lucene/backwards/src/test/org/apache/lucene/messages/MessagesTestBundle.java b/lucene/backwards/src/test/org/apache/lucene/messages/MessagesTestBundle.java
new file mode 100644
index 0000000..d12b6d5
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/messages/MessagesTestBundle.java
@@ -0,0 +1,40 @@
+package org.apache.lucene.messages;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+public class MessagesTestBundle extends NLS {
+
+  private static final String BUNDLE_NAME = MessagesTestBundle.class.getName();
+
+  private MessagesTestBundle() {
+    // should never be instantiated
+  }
+
+  static {
+    // register all string ids with NLS class and initialize static string
+    // values
+    NLS.initializeMessages(BUNDLE_NAME, MessagesTestBundle.class);
+  }
+
+  // static string must match the strings in the property files.
+  public static String Q0001E_INVALID_SYNTAX;
+  public static String Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION;
+
+  // this message is missing from the properties file
+  public static String Q0005E_MESSAGE_NOT_IN_BUNDLE;
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/messages/MessagesTestBundle.properties b/lucene/backwards/src/test/org/apache/lucene/messages/MessagesTestBundle.properties
new file mode 100644
index 0000000..870ff73
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/messages/MessagesTestBundle.properties
@@ -0,0 +1,3 @@
+Q0001E_INVALID_SYNTAX = Syntax Error: {0}
+
+Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION = Truncated unicode escape sequence.
diff --git a/lucene/backwards/src/test/org/apache/lucene/messages/MessagesTestBundle_ja.properties b/lucene/backwards/src/test/org/apache/lucene/messages/MessagesTestBundle_ja.properties
new file mode 100644
index 0000000..2235294
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/messages/MessagesTestBundle_ja.properties
@@ -0,0 +1,3 @@
+Q0001E_INVALID_SYNTAX = \u69cb\u6587\u30a8\u30e9\u30fc: {0}
+
+Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION = \u5207\u308a\u6368\u3066\u3089\u308c\u305f\u30e6\u30cb\u30b3\u30fc\u30c9\u30fb\u30a8\u30b9\u30b1\u30fc\u30d7\u30fb\u30b7\u30fc\u30b1\u30f3\u30b9\u3002
diff --git a/lucene/backwards/src/test/org/apache/lucene/messages/TestNLS.java b/lucene/backwards/src/test/org/apache/lucene/messages/TestNLS.java
new file mode 100644
index 0000000..c9ea9c1
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/messages/TestNLS.java
@@ -0,0 +1,106 @@
+package org.apache.lucene.messages;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Locale;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ */
+public class TestNLS extends LuceneTestCase {
+  public void testMessageLoading() {
+    Message invalidSyntax = new MessageImpl(
+        MessagesTestBundle.Q0001E_INVALID_SYNTAX, "XXX");
+    /* 
+     * if the default locale is ja, you get ja as a fallback:
+     * see ResourceBundle.html#getBundle(java.lang.String, java.util.Locale, java.lang.ClassLoader)
+     */
+    if (!Locale.getDefault().getLanguage().equals("ja"))
+      assertEquals("Syntax Error: XXX", invalidSyntax.getLocalizedMessage(Locale.ENGLISH));
+  }
+
+  public void testMessageLoading_ja() {
+    Message invalidSyntax = new MessageImpl(
+        MessagesTestBundle.Q0001E_INVALID_SYNTAX, "XXX");
+    assertEquals("構文エラー: XXX", invalidSyntax
+        .getLocalizedMessage(Locale.JAPANESE));
+  }
+
+  public void testNLSLoading() {
+    String message = NLS
+        .getLocalizedMessage(MessagesTestBundle.Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION, Locale.ENGLISH);
+    /* 
+     * if the default locale is ja, you get ja as a fallback:
+     * see ResourceBundle.html#getBundle(java.lang.String, java.util.Locale, java.lang.ClassLoader)
+     */
+    if (!Locale.getDefault().getLanguage().equals("ja"))
+      assertEquals("Truncated unicode escape sequence.", message);
+
+    message = NLS.getLocalizedMessage(MessagesTestBundle.Q0001E_INVALID_SYNTAX, Locale.ENGLISH,
+        "XXX");
+    /* 
+     * if the default locale is ja, you get ja as a fallback:
+     * see ResourceBundle.html#getBundle(java.lang.String, java.util.Locale, java.lang.ClassLoader)
+     */
+    if (!Locale.getDefault().getLanguage().equals("ja"))
+      assertEquals("Syntax Error: XXX", message);
+  }
+
+  public void testNLSLoading_ja() {
+    String message = NLS.getLocalizedMessage(
+        MessagesTestBundle.Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION,
+        Locale.JAPANESE);
+    assertEquals("切り捨てられたユニコード・エスケープ・シーケンス。", message);
+
+    message = NLS.getLocalizedMessage(MessagesTestBundle.Q0001E_INVALID_SYNTAX,
+        Locale.JAPANESE, "XXX");
+    assertEquals("構文エラー: XXX", message);
+  }
+
+  public void testNLSLoading_xx_XX() {
+    Locale locale = new Locale("xx", "XX", "");
+    String message = NLS.getLocalizedMessage(
+        MessagesTestBundle.Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION,
+        locale);
+    /* 
+     * if the default locale is ja, you get ja as a fallback:
+     * see ResourceBundle.html#getBundle(java.lang.String, java.util.Locale, java.lang.ClassLoader)
+     */
+    if (!Locale.getDefault().getLanguage().equals("ja"))
+      assertEquals("Truncated unicode escape sequence.", message);
+
+    message = NLS.getLocalizedMessage(MessagesTestBundle.Q0001E_INVALID_SYNTAX,
+        locale, "XXX");
+    /* 
+     * if the default locale is ja, you get ja as a fallback:
+     * see ResourceBundle.html#getBundle(java.lang.String, java.util.Locale, java.lang.ClassLoader)
+     */
+    if (!Locale.getDefault().getLanguage().equals("ja"))
+      assertEquals("Syntax Error: XXX", message);
+  }
+
+  public void testMissingMessage() {
+    Locale locale = Locale.ENGLISH;
+    String message = NLS.getLocalizedMessage(
+        MessagesTestBundle.Q0005E_MESSAGE_NOT_IN_BUNDLE, locale);
+
+    assertEquals("Message with key:Q0005E_MESSAGE_NOT_IN_BUNDLE and locale: "
+        + locale.toString() + " not found.", message);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java b/lucene/backwards/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java
new file mode 100644
index 0000000..6900e5c
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java
@@ -0,0 +1,286 @@
+package org.apache.lucene.queryParser;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.LowerCaseFilter;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.Version;
+
+/**
+ * Test QueryParser's ability to deal with Analyzers that return more
+ * than one token per position or that return tokens with a position
+ * increment &gt; 1.
+ *
+ */
+public class TestMultiAnalyzer extends BaseTokenStreamTestCase {
+
+  private static int multiToken = 0;
+
+  public void testMultiAnalyzer() throws ParseException {
+    
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "", new MultiAnalyzer());
+
+    // trivial, no multiple tokens:
+    assertEquals("foo", qp.parse("foo").toString());
+    assertEquals("foo", qp.parse("\"foo\"").toString());
+    assertEquals("foo foobar", qp.parse("foo foobar").toString());
+    assertEquals("\"foo foobar\"", qp.parse("\"foo foobar\"").toString());
+    assertEquals("\"foo foobar blah\"", qp.parse("\"foo foobar blah\"").toString());
+
+    // two tokens at the same position:
+    assertEquals("(multi multi2) foo", qp.parse("multi foo").toString());
+    assertEquals("foo (multi multi2)", qp.parse("foo multi").toString());
+    assertEquals("(multi multi2) (multi multi2)", qp.parse("multi multi").toString());
+    assertEquals("+(foo (multi multi2)) +(bar (multi multi2))",
+        qp.parse("+(foo multi) +(bar multi)").toString());
+    assertEquals("+(foo (multi multi2)) field:\"bar (multi multi2)\"",
+        qp.parse("+(foo multi) field:\"bar multi\"").toString());
+
+    // phrases:
+    assertEquals("\"(multi multi2) foo\"", qp.parse("\"multi foo\"").toString());
+    assertEquals("\"foo (multi multi2)\"", qp.parse("\"foo multi\"").toString());
+    assertEquals("\"foo (multi multi2) foobar (multi multi2)\"",
+        qp.parse("\"foo multi foobar multi\"").toString());
+
+    // fields:
+    assertEquals("(field:multi field:multi2) field:foo", qp.parse("field:multi field:foo").toString());
+    assertEquals("field:\"(multi multi2) foo\"", qp.parse("field:\"multi foo\"").toString());
+
+    // three tokens at one position:
+    assertEquals("triplemulti multi3 multi2", qp.parse("triplemulti").toString());
+    assertEquals("foo (triplemulti multi3 multi2) foobar",
+        qp.parse("foo triplemulti foobar").toString());
+
+    // phrase with non-default slop:
+    assertEquals("\"(multi multi2) foo\"~10", qp.parse("\"multi foo\"~10").toString());
+
+    // phrase with non-default boost:
+    assertEquals("\"(multi multi2) foo\"^2.0", qp.parse("\"multi foo\"^2").toString());
+
+    // phrase after changing default slop
+    qp.setPhraseSlop(99);
+    assertEquals("\"(multi multi2) foo\"~99 bar",
+                 qp.parse("\"multi foo\" bar").toString());
+    assertEquals("\"(multi multi2) foo\"~99 \"foo bar\"~2",
+                 qp.parse("\"multi foo\" \"foo bar\"~2").toString());
+    qp.setPhraseSlop(0);
+
+    // non-default operator:
+    qp.setDefaultOperator(QueryParser.AND_OPERATOR);
+    assertEquals("+(multi multi2) +foo", qp.parse("multi foo").toString());
+
+  }
+    
+  public void testMultiAnalyzerWithSubclassOfQueryParser() throws ParseException {
+
+    DumbQueryParser qp = new DumbQueryParser("", new MultiAnalyzer());
+    qp.setPhraseSlop(99); // modified default slop
+
+    // direct call to (super's) getFieldQuery to demonstrate differnce
+    // between phrase and multiphrase with modified default slop
+    assertEquals("\"foo bar\"~99",
+                 qp.getSuperFieldQuery("","foo bar", true).toString());
+    assertEquals("\"(multi multi2) bar\"~99",
+                 qp.getSuperFieldQuery("","multi bar", true).toString());
+
+    
+    // ask sublcass to parse phrase with modified default slop
+    assertEquals("\"(multi multi2) foo\"~99 bar",
+                 qp.parse("\"multi foo\" bar").toString());
+    
+  }
+    
+  public void testPosIncrementAnalyzer() throws ParseException {
+    QueryParser qp = new QueryParser(Version.LUCENE_24, "", new PosIncrementAnalyzer());
+    assertEquals("quick brown", qp.parse("the quick brown").toString());
+    assertEquals("\"quick brown\"", qp.parse("\"the quick brown\"").toString());
+    assertEquals("quick brown fox", qp.parse("the quick brown fox").toString());
+    assertEquals("\"quick brown fox\"", qp.parse("\"the quick brown fox\"").toString());
+  }
+  
+  /**
+   * Expands "multi" to "multi" and "multi2", both at the same position,
+   * and expands "triplemulti" to "triplemulti", "multi3", and "multi2".  
+   */
+  private class MultiAnalyzer extends Analyzer {
+
+    public MultiAnalyzer() {
+    }
+
+    @Override
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      TokenStream result = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
+      result = new TestFilter(result);
+      result = new LowerCaseFilter(TEST_VERSION_CURRENT, result);
+      return result;
+    }
+  }
+
+  private final class TestFilter extends TokenFilter {
+    
+    private String prevType;
+    private int prevStartOffset;
+    private int prevEndOffset;
+    
+    private final CharTermAttribute termAtt;
+    private final PositionIncrementAttribute posIncrAtt;
+    private final OffsetAttribute offsetAtt;
+    private final TypeAttribute typeAtt;
+    
+    public TestFilter(TokenStream in) {
+      super(in);
+      termAtt = addAttribute(CharTermAttribute.class);
+      posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+      offsetAtt = addAttribute(OffsetAttribute.class);
+      typeAtt = addAttribute(TypeAttribute.class);
+    }
+
+    @Override
+    public final boolean incrementToken() throws java.io.IOException {
+      if (multiToken > 0) {
+        termAtt.setEmpty().append("multi"+(multiToken+1));
+        offsetAtt.setOffset(prevStartOffset, prevEndOffset);
+        typeAtt.setType(prevType);
+        posIncrAtt.setPositionIncrement(0);
+        multiToken--;
+        return true;
+      } else {
+        boolean next = input.incrementToken();
+        if (!next) {
+          return false;
+        }
+        prevType = typeAtt.type();
+        prevStartOffset = offsetAtt.startOffset();
+        prevEndOffset = offsetAtt.endOffset();
+        String text = termAtt.toString();
+        if (text.equals("triplemulti")) {
+          multiToken = 2;
+          return true;
+        } else if (text.equals("multi")) {
+          multiToken = 1;
+          return true;
+        } else {
+          return true;
+        }
+      }
+    }
+
+    public void reset() throws IOException {
+      super.reset();
+      this.prevType = null;
+      this.prevStartOffset = 0;
+      this.prevEndOffset = 0;
+    }
+  }
+
+  /**
+   * Analyzes "the quick brown" as: quick(incr=2) brown(incr=1).
+   * Does not work correctly for input other than "the quick brown ...".
+   */
+  private class PosIncrementAnalyzer extends Analyzer {
+
+    public PosIncrementAnalyzer() {
+    }
+
+    @Override
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      TokenStream result = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
+      result = new TestPosIncrementFilter(result);
+      result = new LowerCaseFilter(TEST_VERSION_CURRENT, result);
+      return result;
+    }
+  }
+
+  private final class TestPosIncrementFilter extends TokenFilter {
+    
+    CharTermAttribute termAtt;
+    PositionIncrementAttribute posIncrAtt;
+    
+    public TestPosIncrementFilter(TokenStream in) {
+      super(in);
+      termAtt = addAttribute(CharTermAttribute.class);
+      posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+    }
+
+    @Override
+    public final boolean incrementToken () throws java.io.IOException {
+      while(input.incrementToken()) {
+        if (termAtt.toString().equals("the")) {
+          // stopword, do nothing
+        } else if (termAtt.toString().equals("quick")) {
+          posIncrAtt.setPositionIncrement(2);
+          return true;
+        } else {
+          posIncrAtt.setPositionIncrement(1);
+          return true;
+        }
+      }
+      return false;
+    }
+  }
+
+    /** a very simple subclass of QueryParser */
+    private final static class DumbQueryParser extends QueryParser {
+        
+        public DumbQueryParser(String f, Analyzer a) {
+            super(TEST_VERSION_CURRENT, f, a);
+        }
+
+        /** expose super's version */
+        public Query getSuperFieldQuery(String f, String t, boolean quoted) 
+            throws ParseException {
+            return super.getFieldQuery(f,t,quoted);
+        }
+        /** wrap super's version */
+        @Override
+        protected Query getFieldQuery(String f, String t, boolean quoted)
+            throws ParseException {
+            return new DumbQueryWrapper(getSuperFieldQuery(f,t,quoted));
+        }
+    }
+    
+    /**
+     * A very simple wrapper to prevent instanceof checks but uses
+     * the toString of the query it wraps.
+     */
+    private final static class DumbQueryWrapper extends Query {
+
+        private Query q;
+        public DumbQueryWrapper(Query q) {
+            super();
+            this.q = q;
+        }
+        @Override
+        public String toString(String f) {
+            return q.toString(f);
+        }
+    }
+    
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java b/lucene/backwards/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java
new file mode 100644
index 0000000..9aea4db
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java
@@ -0,0 +1,329 @@
+package org.apache.lucene.queryParser;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Tests QueryParser.
+ */
+public class TestMultiFieldQueryParser extends LuceneTestCase {
+
+  /** test stop words parsing for both the non static form, and for the 
+   * corresponding static form (qtxt, fields[]). */
+  public void testStopwordsParsing() throws Exception {
+    assertStopQueryEquals("one", "b:one t:one");  
+    assertStopQueryEquals("one stop", "b:one t:one");  
+    assertStopQueryEquals("one (stop)", "b:one t:one");  
+    assertStopQueryEquals("one ((stop))", "b:one t:one");  
+    assertStopQueryEquals("stop", "");  
+    assertStopQueryEquals("(stop)", "");  
+    assertStopQueryEquals("((stop))", "");  
+  }
+
+  // verify parsing of query using a stopping analyzer  
+  private void assertStopQueryEquals (String qtxt, String expectedRes) throws Exception {
+    String[] fields = {"b", "t"};
+    Occur occur[] = {Occur.SHOULD, Occur.SHOULD};
+    TestQueryParser.QPTestAnalyzer a = new TestQueryParser.QPTestAnalyzer();
+    MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, a);
+    
+    Query q = mfqp.parse(qtxt);
+    assertEquals(expectedRes, q.toString());
+    
+    q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, qtxt, fields, occur, a);
+    assertEquals(expectedRes, q.toString());
+  }
+  
+  public void testSimple() throws Exception {
+    String[] fields = {"b", "t"};
+    MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new MockAnalyzer(random));
+    
+    Query q = mfqp.parse("one");
+    assertEquals("b:one t:one", q.toString());
+    
+    q = mfqp.parse("one two");
+    assertEquals("(b:one t:one) (b:two t:two)", q.toString());
+    
+    q = mfqp.parse("+one +two");
+    assertEquals("+(b:one t:one) +(b:two t:two)", q.toString());
+
+    q = mfqp.parse("+one -two -three");
+    assertEquals("+(b:one t:one) -(b:two t:two) -(b:three t:three)", q.toString());
+    
+    q = mfqp.parse("one^2 two");
+    assertEquals("((b:one t:one)^2.0) (b:two t:two)", q.toString());
+
+    q = mfqp.parse("one~ two");
+    assertEquals("(b:one~0.5 t:one~0.5) (b:two t:two)", q.toString());
+
+    q = mfqp.parse("one~0.8 two^2");
+    assertEquals("(b:one~0.8 t:one~0.8) ((b:two t:two)^2.0)", q.toString());
+
+    q = mfqp.parse("one* two*");
+    assertEquals("(b:one* t:one*) (b:two* t:two*)", q.toString());
+
+    q = mfqp.parse("[a TO c] two");
+    assertEquals("(b:[a TO c] t:[a TO c]) (b:two t:two)", q.toString());
+
+    q = mfqp.parse("w?ldcard");
+    assertEquals("b:w?ldcard t:w?ldcard", q.toString());
+
+    q = mfqp.parse("\"foo bar\"");
+    assertEquals("b:\"foo bar\" t:\"foo bar\"", q.toString());
+
+    q = mfqp.parse("\"aa bb cc\" \"dd ee\"");
+    assertEquals("(b:\"aa bb cc\" t:\"aa bb cc\") (b:\"dd ee\" t:\"dd ee\")", q.toString());
+
+    q = mfqp.parse("\"foo bar\"~4");
+    assertEquals("b:\"foo bar\"~4 t:\"foo bar\"~4", q.toString());
+
+    // LUCENE-1213: MultiFieldQueryParser was ignoring slop when phrase had a field.
+    q = mfqp.parse("b:\"foo bar\"~4"); 
+    assertEquals("b:\"foo bar\"~4", q.toString());
+
+    // make sure that terms which have a field are not touched:
+    q = mfqp.parse("one f:two");
+    assertEquals("(b:one t:one) f:two", q.toString());
+
+    // AND mode:
+    mfqp.setDefaultOperator(QueryParser.AND_OPERATOR);
+    q = mfqp.parse("one two");
+    assertEquals("+(b:one t:one) +(b:two t:two)", q.toString());
+    q = mfqp.parse("\"aa bb cc\" \"dd ee\"");
+    assertEquals("+(b:\"aa bb cc\" t:\"aa bb cc\") +(b:\"dd ee\" t:\"dd ee\")", q.toString());
+
+  }
+  
+  public void testBoostsSimple() throws Exception {
+      Map<String,Float> boosts = new HashMap<String,Float>();
+      boosts.put("b", Float.valueOf(5));
+      boosts.put("t", Float.valueOf(10));
+      String[] fields = {"b", "t"};
+      MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new MockAnalyzer(random), boosts);
+      
+      
+      //Check for simple
+      Query q = mfqp.parse("one");
+      assertEquals("b:one^5.0 t:one^10.0", q.toString());
+      
+      //Check for AND
+      q = mfqp.parse("one AND two");
+      assertEquals("+(b:one^5.0 t:one^10.0) +(b:two^5.0 t:two^10.0)", q.toString());
+      
+      //Check for OR
+      q = mfqp.parse("one OR two");
+      assertEquals("(b:one^5.0 t:one^10.0) (b:two^5.0 t:two^10.0)", q.toString());
+      
+      //Check for AND and a field
+      q = mfqp.parse("one AND two AND foo:test");
+      assertEquals("+(b:one^5.0 t:one^10.0) +(b:two^5.0 t:two^10.0) +foo:test", q.toString());
+      
+      q = mfqp.parse("one^3 AND two^4");
+      assertEquals("+((b:one^5.0 t:one^10.0)^3.0) +((b:two^5.0 t:two^10.0)^4.0)", q.toString());
+  }
+
+  public void testStaticMethod1() throws ParseException {
+    String[] fields = {"b", "t"};
+    String[] queries = {"one", "two"};
+    Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, new MockAnalyzer(random));
+    assertEquals("b:one t:two", q.toString());
+
+    String[] queries2 = {"+one", "+two"};
+    q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries2, fields, new MockAnalyzer(random));
+    assertEquals("(+b:one) (+t:two)", q.toString());
+
+    String[] queries3 = {"one", "+two"};
+    q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries3, fields, new MockAnalyzer(random));
+    assertEquals("b:one (+t:two)", q.toString());
+
+    String[] queries4 = {"one +more", "+two"};
+    q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries4, fields, new MockAnalyzer(random));
+    assertEquals("(b:one +b:more) (+t:two)", q.toString());
+
+    String[] queries5 = {"blah"};
+    try {
+      q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries5, fields, new MockAnalyzer(random));
+      fail();
+    } catch(IllegalArgumentException e) {
+      // expected exception, array length differs
+    }
+    
+    // check also with stop words for this static form (qtxts[], fields[]).
+    TestQueryParser.QPTestAnalyzer stopA = new TestQueryParser.QPTestAnalyzer();
+    
+    String[] queries6 = {"((+stop))", "+((stop))"};
+    q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries6, fields, stopA);
+    assertEquals("", q.toString());
+    
+    String[] queries7 = {"one ((+stop)) +more", "+((stop)) +two"};
+    q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries7, fields, stopA);
+    assertEquals("(b:one +b:more) (+t:two)", q.toString());
+
+  }
+
+  public void testStaticMethod2() throws ParseException {
+    String[] fields = {"b", "t"};
+    BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT};
+    Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one", fields, flags, new MockAnalyzer(random));
+    assertEquals("+b:one -t:one", q.toString());
+
+    q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one two", fields, flags, new MockAnalyzer(random));
+    assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
+
+    try {
+      BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST};
+      q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "blah", fields, flags2, new MockAnalyzer(random));
+      fail();
+    } catch(IllegalArgumentException e) {
+      // expected exception, array length differs
+    }
+  }
+
+  public void testStaticMethod2Old() throws ParseException {
+    String[] fields = {"b", "t"};
+    //int[] flags = {MultiFieldQueryParser.REQUIRED_FIELD, MultiFieldQueryParser.PROHIBITED_FIELD};
+      BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT};
+
+    Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one", fields, flags, new MockAnalyzer(random));//, fields, flags, new MockAnalyzer(random));
+    assertEquals("+b:one -t:one", q.toString());
+
+    q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one two", fields, flags, new MockAnalyzer(random));
+    assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
+
+    try {
+      BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST};
+      q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "blah", fields, flags2, new MockAnalyzer(random));
+      fail();
+    } catch(IllegalArgumentException e) {
+      // expected exception, array length differs
+    }
+  }
+
+  public void testStaticMethod3() throws ParseException {
+    String[] queries = {"one", "two", "three"};
+    String[] fields = {"f1", "f2", "f3"};
+    BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST,
+        BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD};
+    Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags, new MockAnalyzer(random));
+    assertEquals("+f1:one -f2:two f3:three", q.toString());
+
+    try {
+      BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST};
+      q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags2, new MockAnalyzer(random));
+      fail();
+    } catch(IllegalArgumentException e) {
+      // expected exception, array length differs
+    }
+  }
+
+  public void testStaticMethod3Old() throws ParseException {
+    String[] queries = {"one", "two"};
+    String[] fields = {"b", "t"};
+      BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT};
+    Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags, new MockAnalyzer(random));
+    assertEquals("+b:one -t:two", q.toString());
+
+    try {
+      BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST};
+      q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags2, new MockAnalyzer(random));
+      fail();
+    } catch(IllegalArgumentException e) {
+      // expected exception, array length differs
+    }
+  }
+
+  public void testAnalyzerReturningNull() throws ParseException {
+    String[] fields = new String[] { "f1", "f2", "f3" };
+    MultiFieldQueryParser parser = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new AnalyzerReturningNull());
+    Query q = parser.parse("bla AND blo");
+    assertEquals("+(f2:bla f3:bla) +(f2:blo f3:blo)", q.toString());
+    // the following queries are not affected as their terms are not analyzed anyway:
+    q = parser.parse("bla*");
+    assertEquals("f1:bla* f2:bla* f3:bla*", q.toString());
+    q = parser.parse("bla~");
+    assertEquals("f1:bla~0.5 f2:bla~0.5 f3:bla~0.5", q.toString());
+    q = parser.parse("[a TO c]");
+    assertEquals("f1:[a TO c] f2:[a TO c] f3:[a TO c]", q.toString());
+  }
+
+  public void testStopWordSearching() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random);
+    Directory ramDir = newDirectory();
+    IndexWriter iw =  new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
+    Document doc = new Document();
+    doc.add(newField("body", "blah the footest blah", Field.Store.NO, Field.Index.ANALYZED));
+    iw.addDocument(doc);
+    iw.close();
+    
+    MultiFieldQueryParser mfqp = 
+      new MultiFieldQueryParser(TEST_VERSION_CURRENT, new String[] {"body"}, analyzer);
+    mfqp.setDefaultOperator(QueryParser.Operator.AND);
+    Query q = mfqp.parse("the footest");
+    IndexSearcher is = new IndexSearcher(ramDir, true);
+    ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    is.close();
+    ramDir.close();
+  }
+  
+  /**
+   * Return empty tokens for field "f1".
+   */
+  private static class AnalyzerReturningNull extends Analyzer {
+    MockAnalyzer stdAnalyzer = new MockAnalyzer(random);
+
+    public AnalyzerReturningNull() {
+    }
+
+    @Override
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      if ("f1".equals(fieldName)) {
+        return new EmptyTokenStream();
+      } else {
+        return stdAnalyzer.tokenStream(fieldName, reader);
+      }
+    }
+
+    private static class EmptyTokenStream extends TokenStream {
+      @Override
+      public boolean incrementToken() throws IOException {
+        return false;
+      }
+    }
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/queryParser/TestQueryParser.java b/lucene/backwards/src/test/org/apache/lucene/queryParser/TestQueryParser.java
new file mode 100644
index 0000000..7833a7b
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/queryParser/TestQueryParser.java
@@ -0,0 +1,37 @@
+package org.apache.lucene.queryParser;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+
+/**
+ * Tests QueryParser.
+ */
+public class TestQueryParser extends QueryParserTestBase {
+
+  public QueryParser getParser(Analyzer a) throws Exception {
+    if (a == null)
+      a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a);
+    qp.setDefaultOperator(QueryParser.OR_OPERATOR);
+    return qp;
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/BaseTestRangeFilter.java b/lucene/backwards/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
new file mode 100644
index 0000000..1b70095
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
@@ -0,0 +1,189 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Random;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class BaseTestRangeFilter extends LuceneTestCase {
+  
+  public static final boolean F = false;
+  public static final boolean T = true;
+  
+  /**
+   * Collation interacts badly with hyphens -- collation produces different
+   * ordering than Unicode code-point ordering -- so two indexes are created:
+   * one which can't have negative random integers, for testing collated ranges,
+   * and the other which can have negative random integers, for all other tests.
+   */
+  static class TestIndex {
+    int maxR;
+    int minR;
+    boolean allowNegativeRandomInts;
+    Directory index;
+    
+    TestIndex(Random random, int minR, int maxR, boolean allowNegativeRandomInts) {
+      this.minR = minR;
+      this.maxR = maxR;
+      this.allowNegativeRandomInts = allowNegativeRandomInts;
+      try {
+        index = newDirectory(random);
+      } catch (IOException e) { throw new RuntimeException(e); }
+    }
+  }
+  
+  static IndexReader signedIndexReader;
+  static IndexReader unsignedIndexReader;
+  
+  static TestIndex signedIndexDir;
+  static TestIndex unsignedIndexDir;
+  
+  static int minId = 0;
+  static int maxId;
+  
+  static final int intLength = Integer.toString(Integer.MAX_VALUE).length();
+  
+  /**
+   * a simple padding function that should work with any int
+   */
+  public static String pad(int n) {
+    StringBuilder b = new StringBuilder(40);
+    String p = "0";
+    if (n < 0) {
+      p = "-";
+      n = Integer.MAX_VALUE + n + 1;
+    }
+    b.append(p);
+    String s = Integer.toString(n);
+    for (int i = s.length(); i <= intLength; i++) {
+      b.append("0");
+    }
+    b.append(s);
+    
+    return b.toString();
+  }
+  
+  @BeforeClass
+  public static void beforeClassBaseTestRangeFilter() throws Exception {
+    maxId = atLeast(500);
+    signedIndexDir = new TestIndex(random, Integer.MAX_VALUE, Integer.MIN_VALUE, true);
+    unsignedIndexDir = new TestIndex(random, Integer.MAX_VALUE, 0, false);
+    signedIndexReader = build(random, signedIndexDir);
+    unsignedIndexReader = build(random, unsignedIndexDir);
+  }
+  
+  @AfterClass
+  public static void afterClassBaseTestRangeFilter() throws Exception {
+    signedIndexReader.close();
+    unsignedIndexReader.close();
+    signedIndexDir.index.close();
+    unsignedIndexDir.index.close();
+    signedIndexReader = null;
+    unsignedIndexReader = null;
+    signedIndexDir = null;
+    unsignedIndexDir = null;
+  }
+  
+  private static IndexReader build(Random random, TestIndex index) throws IOException {
+    /* build an index */
+
+    Document doc = new Document();
+    Field idField = newField(random, "id", "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
+    Field randField = newField(random, "rand", "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
+    Field bodyField = newField(random, "body", "", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS);
+    doc.add(idField);
+    doc.add(randField);
+    doc.add(bodyField);
+
+    RandomIndexWriter writer = new RandomIndexWriter(random, index.index, 
+                                                     newIndexWriterConfig(random, TEST_VERSION_CURRENT, new MockAnalyzer(random))
+                                                     .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)).setMergePolicy(newLogMergePolicy()));
+    _TestUtil.reduceOpenFiles(writer.w);
+    while(true) {
+
+      int minCount = 0;
+      int maxCount = 0;
+
+      for (int d = minId; d <= maxId; d++) {
+        idField.setValue(pad(d));
+        int r = index.allowNegativeRandomInts ? random.nextInt() : random
+          .nextInt(Integer.MAX_VALUE);
+        if (index.maxR < r) {
+          index.maxR = r;
+          maxCount = 1;
+        } else if (index.maxR == r) {
+          maxCount++;
+        }
+
+        if (r < index.minR) {
+          index.minR = r;
+          minCount = 1;
+        } else if (r == index.minR) {
+          minCount++;
+        }
+        randField.setValue(pad(r));
+        bodyField.setValue("body");
+        writer.addDocument(doc);
+      }
+
+      if (minCount == 1 && maxCount == 1) {
+        // our subclasses rely on only 1 doc having the min or
+        // max, so, we loop until we satisfy that.  it should be
+        // exceedingly rare (Yonik calculates 1 in ~429,000)
+        // times) that this loop requires more than one try:
+        IndexReader ir = writer.getReader();
+        writer.close();
+        return ir;
+      }
+
+      // try again
+      writer.deleteAll();
+    }
+  }
+  
+  @Test
+  public void testPad() {
+    
+    int[] tests = new int[] {-9999999, -99560, -100, -3, -1, 0, 3, 9, 10, 1000,
+        999999999};
+    for (int i = 0; i < tests.length - 1; i++) {
+      int a = tests[i];
+      int b = tests[i + 1];
+      String aa = pad(a);
+      String bb = pad(b);
+      String label = a + ":" + aa + " vs " + b + ":" + bb;
+      assertEquals("length of " + label, aa.length(), bb.length());
+      assertTrue("compare less than " + label, aa.compareTo(bb) < 0);
+    }
+    
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/JustCompileSearch.java b/lucene/backwards/src/test/org/apache/lucene/search/JustCompileSearch.java
new file mode 100644
index 0000000..4ec4556
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/JustCompileSearch.java
@@ -0,0 +1,501 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.index.CorruptIndexException;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.FieldInvertState;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.util.PriorityQueue;
+
+/**
+ * Holds all implementations of classes in the o.a.l.search package as a
+ * back-compatibility test. It does not run any tests per-se, however if 
+ * someone adds a method to an interface or abstract method to an abstract
+ * class, one of the implementations here will fail to compile and so we know
+ * back-compat policy was violated.
+ */
+final class JustCompileSearch {
+
+  private static final String UNSUPPORTED_MSG = "unsupported: used for back-compat testing only !";
+
+  static final class JustCompileSearcher extends Searcher {
+
+    @Override
+    public Weight createNormalizedWeight(Query query) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+    @Override
+    public void close() throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public Document doc(int i) throws CorruptIndexException, IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public int[] docFreqs(Term[] terms) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public Explanation explain(Query query, int doc) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public Similarity getSimilarity() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public void search(Query query, Collector results) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public void search(Query query, Filter filter, Collector results)
+        throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public TopDocs search(Query query, Filter filter, int n) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+    @Override
+    public TopFieldDocs search(Query query, Filter filter, int n, Sort sort)
+        throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+    @Override
+    public TopDocs search(Query query, int n) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+    @Override
+    public void setSimilarity(Similarity similarity) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+    @Override
+    public int docFreq(Term term) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public Explanation explain(Weight weight, int doc) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public int maxDoc() throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public Query rewrite(Query query) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public void search(Weight weight, Filter filter, Collector results)
+        throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public TopDocs search(Weight weight, Filter filter, int n)
+        throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort)
+        throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public Document doc(int n, FieldSelector fieldSelector)
+        throws CorruptIndexException, IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+  }
+  
+  static final class JustCompileCollector extends Collector {
+
+    @Override
+    public void collect(int doc) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public void setNextReader(IndexReader reader, int docBase)
+        throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public void setScorer(Scorer scorer) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+    @Override
+    public boolean acceptsDocsOutOfOrder() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+  }
+  
+  static final class JustCompileDocIdSet extends DocIdSet {
+
+    @Override
+    public DocIdSetIterator iterator() throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+  }
+
+  static final class JustCompileDocIdSetIterator extends DocIdSetIterator {
+
+    @Override
+    public int docID() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public int nextDoc() throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+    @Override
+    public int advance(int target) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+  }
+  
+  static final class JustCompileExtendedFieldCacheLongParser implements FieldCache.LongParser {
+
+    public long parseLong(String string) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+  }
+  
+  static final class JustCompileExtendedFieldCacheDoubleParser implements FieldCache.DoubleParser {
+    
+    public double parseDouble(String string) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+  }
+
+  static final class JustCompileFieldComparator extends FieldComparator<Object> {
+
+    @Override
+    public int compare(int slot1, int slot2) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public int compareBottom(int doc) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public void copy(int slot, int doc) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public void setBottom(int slot) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public void setNextReader(IndexReader reader, int docBase)
+        throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public Object value(int slot) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+  }
+
+  static final class JustCompileFieldComparatorSource extends FieldComparatorSource {
+
+    @Override
+    public FieldComparator newComparator(String fieldname, int numHits,
+        int sortPos, boolean reversed) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+  }
+
+  static final class JustCompileFilter extends Filter {
+    // Filter is just an abstract class with no abstract methods. However it is
+    // still added here in case someone will add abstract methods in the future.
+    
+    @Override
+    public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
+      return null;
+    }
+  }
+
+  static final class JustCompileFilteredDocIdSet extends FilteredDocIdSet {
+
+    public JustCompileFilteredDocIdSet(DocIdSet innerSet) {
+      super(innerSet);
+    }
+
+    @Override
+    protected boolean match(int docid) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+  }
+
+  static final class JustCompileFilteredDocIdSetIterator extends FilteredDocIdSetIterator {
+
+    public JustCompileFilteredDocIdSetIterator(DocIdSetIterator innerIter) {
+      super(innerIter);
+    }
+
+    @Override
+    protected boolean match(int doc) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+  }
+
+  static final class JustCompileFilteredTermEnum extends FilteredTermEnum {
+
+    @Override
+    public float difference() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    protected boolean endEnum() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    protected boolean termCompare(Term term) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+  }
+
+  static final class JustCompilePhraseScorer extends PhraseScorer {
+
+    JustCompilePhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
+        Similarity similarity, byte[] norms) {
+      super(weight, postings, similarity, norms);
+    }
+
+    @Override
+    protected float phraseFreq() throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+  }
+
+  static final class JustCompileQuery extends Query {
+
+    @Override
+    public String toString(String field) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+  }
+  
+  static final class JustCompileScorer extends Scorer {
+
+    protected JustCompileScorer(Weight weight) {
+      super(weight);
+    }
+
+    @Override
+    protected boolean score(Collector collector, int max, int firstDocID)
+        throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+    @Override
+    public float score() throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public int docID() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public int nextDoc() throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+    @Override
+    public int advance(int target) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+  }
+  
+  static final class JustCompileSimilarity extends Similarity {
+
+    @Override
+    public float coord(int overlap, int maxOverlap) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public float idf(int docFreq, int numDocs) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public float computeNorm(String fieldName, FieldInvertState state) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public float queryNorm(float sumOfSquaredWeights) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public float sloppyFreq(int distance) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public float tf(float freq) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+  }
+
+  static final class JustCompileSpanFilter extends SpanFilter {
+
+    @Override
+    public SpanFilterResult bitSpans(IndexReader reader) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+    @Override
+    public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
+      return null;
+    }    
+  }
+
+  static final class JustCompileTopDocsCollector extends TopDocsCollector<ScoreDoc> {
+
+    protected JustCompileTopDocsCollector(PriorityQueue<ScoreDoc> pq) {
+      super(pq);
+    }
+
+    @Override
+    public void collect(int doc) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public void setNextReader(IndexReader reader, int docBase)
+        throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public void setScorer(Scorer scorer) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+    @Override
+    public boolean acceptsDocsOutOfOrder() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public TopDocs topDocs() {
+        throw new UnsupportedOperationException( UNSUPPORTED_MSG );
+    }
+
+    @Override
+    public TopDocs topDocs( int start ) {
+        throw new UnsupportedOperationException( UNSUPPORTED_MSG );
+    }
+
+    @Override
+    public TopDocs topDocs( int start, int end ) {
+        throw new UnsupportedOperationException( UNSUPPORTED_MSG );
+    }
+    
+  }
+
+  static final class JustCompileWeight extends Weight {
+
+    @Override
+    public Explanation explain(IndexReader reader, int doc) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public Query getQuery() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public float getValue() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public void normalize(float norm) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public float sumOfSquaredWeights() throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer)
+        throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/MockFilter.java b/lucene/backwards/src/test/org/apache/lucene/search/MockFilter.java
new file mode 100644
index 0000000..36b4247
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/MockFilter.java
@@ -0,0 +1,40 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.util.DocIdBitSet;
+import java.util.BitSet;
+
+public class MockFilter extends Filter {
+  private boolean wasCalled;
+
+  @Override
+  public DocIdSet getDocIdSet(IndexReader reader) {
+    wasCalled = true;
+    return new DocIdBitSet(new BitSet());
+  }
+
+  public void clear() {
+    wasCalled = false;
+  }
+
+  public boolean wasCalled() {
+    return wasCalled;
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/MultiCollectorTest.java b/lucene/backwards/src/test/org/apache/lucene/search/MultiCollectorTest.java
new file mode 100644
index 0000000..ae988c0
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/MultiCollectorTest.java
@@ -0,0 +1,110 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.Test;
+
+public class MultiCollectorTest extends LuceneTestCase {
+
+  private static class DummyCollector extends Collector {
+
+    boolean acceptsDocsOutOfOrderCalled = false;
+    boolean collectCalled = false;
+    boolean setNextReaderCalled = false;
+    boolean setScorerCalled = false;
+
+    @Override
+    public boolean acceptsDocsOutOfOrder() {
+      acceptsDocsOutOfOrderCalled = true;
+      return true;
+    }
+
+    @Override
+    public void collect(int doc) throws IOException {
+      collectCalled = true;
+    }
+
+    @Override
+    public void setNextReader(IndexReader reader, int docBase) throws IOException {
+      setNextReaderCalled = true;
+    }
+
+    @Override
+    public void setScorer(Scorer scorer) throws IOException {
+      setScorerCalled = true;
+    }
+
+  }
+
+  @Test
+  public void testNullCollectors() throws Exception {
+    // Tests that the collector rejects all null collectors.
+    try {
+      MultiCollector.wrap(null, null);
+      fail("only null collectors should not be supported");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+
+    // Tests that the collector handles some null collectors well. If it
+    // doesn't, an NPE would be thrown.
+    Collector c = MultiCollector.wrap(new DummyCollector(), null, new DummyCollector());
+    assertTrue(c instanceof MultiCollector);
+    assertTrue(c.acceptsDocsOutOfOrder());
+    c.collect(1);
+    c.setNextReader(null, 0);
+    c.setScorer(null);
+  }
+
+  @Test
+  public void testSingleCollector() throws Exception {
+    // Tests that if a single Collector is input, it is returned (and not MultiCollector).
+    DummyCollector dc = new DummyCollector();
+    assertSame(dc, MultiCollector.wrap(dc));
+    assertSame(dc, MultiCollector.wrap(dc, null));
+  }
+  
+  @Test
+  public void testCollector() throws Exception {
+    // Tests that the collector delegates calls to input collectors properly.
+
+    // Tests that the collector handles some null collectors well. If it
+    // doesn't, an NPE would be thrown.
+    DummyCollector[] dcs = new DummyCollector[] { new DummyCollector(), new DummyCollector() };
+    Collector c = MultiCollector.wrap(dcs);
+    assertTrue(c.acceptsDocsOutOfOrder());
+    c.collect(1);
+    c.setNextReader(null, 0);
+    c.setScorer(null);
+
+    for (DummyCollector dc : dcs) {
+      assertTrue(dc.acceptsDocsOutOfOrderCalled);
+      assertTrue(dc.collectCalled);
+      assertTrue(dc.setNextReaderCalled);
+      assertTrue(dc.setScorerCalled);
+    }
+
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/SingleDocTestFilter.java b/lucene/backwards/src/test/org/apache/lucene/search/SingleDocTestFilter.java
new file mode 100644
index 0000000..bd1df4e
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/SingleDocTestFilter.java
@@ -0,0 +1,39 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.util.DocIdBitSet;
+
+import java.util.BitSet;
+import java.io.IOException;
+
+public class SingleDocTestFilter extends Filter {
+  private int doc;
+
+  public SingleDocTestFilter(int doc) {
+    this.doc = doc;
+  }
+
+  @Override
+  public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
+    BitSet bits = new BitSet(reader.maxDoc());
+    bits.set(doc);
+    return new DocIdBitSet(bits);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestBoolean2.java b/lucene/backwards/src/test/org/apache/lucene/search/TestBoolean2.java
new file mode 100644
index 0000000..a16818f
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestBoolean2.java
@@ -0,0 +1,324 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import java.util.Random;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.queryParser.ParseException;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/** Test BooleanQuery2 against BooleanQuery by overriding the standard query parser.
+ * This also tests the scoring order of BooleanQuery.
+ */
+public class TestBoolean2 extends LuceneTestCase {
+  private static IndexSearcher searcher;
+  private static IndexSearcher bigSearcher;
+  private static IndexReader reader;
+  private static int NUM_EXTRA_DOCS = 6000;
+
+  public static final String field = "field";
+  private static Directory directory;
+  private static Directory dir2;
+  private static int mulFactor;
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    directory = newDirectory();
+    RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    for (int i = 0; i < docFields.length; i++) {
+      Document doc = new Document();
+      doc.add(newField(field, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+    writer.close();
+    searcher = new IndexSearcher(directory, true);
+
+    // Make big index
+    dir2 = new MockDirectoryWrapper(random, new RAMDirectory(directory));
+
+    // First multiply small test index:
+    mulFactor = 1;
+    int docCount = 0;
+    do {
+      final Directory copy = new MockDirectoryWrapper(random, new RAMDirectory(dir2));
+      RandomIndexWriter w = new RandomIndexWriter(random, dir2);
+      w.addIndexes(new Directory[] {copy});
+      docCount = w.maxDoc();
+      w.close();
+      mulFactor *= 2;
+    } while(docCount < 3000);
+
+    RandomIndexWriter w = new RandomIndexWriter(random, dir2, 
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
+    Document doc = new Document();
+    doc.add(newField("field2", "xxx", Field.Store.NO, Field.Index.ANALYZED));
+    for(int i=0;i<NUM_EXTRA_DOCS/2;i++) {
+      w.addDocument(doc);
+    }
+    doc = new Document();
+    doc.add(newField("field2", "big bad bug", Field.Store.NO, Field.Index.ANALYZED));
+    for(int i=0;i<NUM_EXTRA_DOCS/2;i++) {
+      w.addDocument(doc);
+    }
+    reader = w.getReader();
+    bigSearcher = newSearcher(reader);
+    w.close();
+  }
+
+  @AfterClass
+  public static void afterClass() throws Exception {
+    searcher.close();
+    reader.close();
+    dir2.close();
+    directory.close();
+    bigSearcher.close();
+    searcher = null;
+    reader = null;
+    dir2 = null;
+    directory = null;
+    bigSearcher = null;
+  }
+
+  private static String[] docFields = {
+    "w1 w2 w3 w4 w5",
+    "w1 w3 w2 w3",
+    "w1 xx w2 yy w3",
+    "w1 w3 xx w2 yy w3"
+  };
+
+  public Query makeQuery(String queryText) throws ParseException {
+    Query q = (new QueryParser(TEST_VERSION_CURRENT, field, new MockAnalyzer(random))).parse(queryText);
+    return q;
+  }
+
+  public void queriesTest(String queryText, int[] expDocNrs) throws Exception {
+//System.out.println();
+//System.out.println("Query: " + queryText);
+
+    Query query = makeQuery(queryText);
+    TopScoreDocCollector collector = TopScoreDocCollector.create(1000, false);
+    searcher.search(query, null, collector);
+    ScoreDoc[] hits1 = collector.topDocs().scoreDocs;
+
+    collector = TopScoreDocCollector.create(1000, true);
+    searcher.search(query, null, collector);
+    ScoreDoc[] hits2 = collector.topDocs().scoreDocs; 
+
+    assertEquals(mulFactor * collector.totalHits,
+                 bigSearcher.search(query, 1).totalHits);
+      
+    CheckHits.checkHitsQuery(query, hits1, hits2, expDocNrs);
+  }
+
+  @Test
+  public void testQueries01() throws Exception {
+    String queryText = "+w3 +xx";
+    int[] expDocNrs = {2,3};
+    queriesTest(queryText, expDocNrs);
+  }
+
+  @Test
+  public void testQueries02() throws Exception {
+    String queryText = "+w3 xx";
+    int[] expDocNrs = {2,3,1,0};
+    queriesTest(queryText, expDocNrs);
+  }
+
+  @Test
+  public void testQueries03() throws Exception {
+    String queryText = "w3 xx";
+    int[] expDocNrs = {2,3,1,0};
+    queriesTest(queryText, expDocNrs);
+  }
+
+  @Test
+  public void testQueries04() throws Exception {
+    String queryText = "w3 -xx";
+    int[] expDocNrs = {1,0};
+    queriesTest(queryText, expDocNrs);
+  }
+
+  @Test
+  public void testQueries05() throws Exception {
+    String queryText = "+w3 -xx";
+    int[] expDocNrs = {1,0};
+    queriesTest(queryText, expDocNrs);
+  }
+
+  @Test
+  public void testQueries06() throws Exception {
+    String queryText = "+w3 -xx -w5";
+    int[] expDocNrs = {1};
+    queriesTest(queryText, expDocNrs);
+  }
+
+  @Test
+  public void testQueries07() throws Exception {
+    String queryText = "-w3 -xx -w5";
+    int[] expDocNrs = {};
+    queriesTest(queryText, expDocNrs);
+  }
+
+  @Test
+  public void testQueries08() throws Exception {
+    String queryText = "+w3 xx -w5";
+    int[] expDocNrs = {2,3,1};
+    queriesTest(queryText, expDocNrs);
+  }
+
+  @Test
+  public void testQueries09() throws Exception {
+    String queryText = "+w3 +xx +w2 zz";
+    int[] expDocNrs = {2, 3};
+    queriesTest(queryText, expDocNrs);
+  }
+
+  @Test
+  public void testQueries10() throws Exception {
+    String queryText = "+w3 +xx +w2 zz";
+    int[] expDocNrs = {2, 3};
+    Similarity oldSimilarity = searcher.getSimilarity();
+    try {
+      searcher.setSimilarity(new DefaultSimilarity(){
+        @Override
+        public float coord(int overlap, int maxOverlap) {
+          return overlap / ((float)maxOverlap - 1);
+        }
+      });
+      queriesTest(queryText, expDocNrs);
+    } finally {
+      searcher.setSimilarity(oldSimilarity);
+    }
+  }
+
+  @Test
+  public void testRandomQueries() throws Exception {
+    String[] vals = {"w1","w2","w3","w4","w5","xx","yy","zzz"};
+
+    int tot=0;
+
+    BooleanQuery q1 = null;
+    try {
+
+      // increase number of iterations for more complete testing
+      int num = atLeast(10);
+      for (int i=0; i<num; i++) {
+        int level = random.nextInt(3);
+        q1 = randBoolQuery(new Random(random.nextLong()), random.nextBoolean(), level, field, vals, null);
+        
+        // Can't sort by relevance since floating point numbers may not quite
+        // match up.
+        Sort sort = Sort.INDEXORDER;
+
+        QueryUtils.check(random, q1,searcher);
+
+        TopFieldCollector collector = TopFieldCollector.create(sort, 1000,
+            false, true, true, true);
+
+        searcher.search(q1, null, collector);
+        ScoreDoc[] hits1 = collector.topDocs().scoreDocs;
+
+        collector = TopFieldCollector.create(sort, 1000,
+            false, true, true, false);
+        
+        searcher.search(q1, null, collector);
+        ScoreDoc[] hits2 = collector.topDocs().scoreDocs;
+        tot+=hits2.length;
+        CheckHits.checkEqual(q1, hits1, hits2);
+
+        BooleanQuery q3 = new BooleanQuery();
+        q3.add(q1, BooleanClause.Occur.SHOULD);
+        q3.add(new PrefixQuery(new Term("field2", "b")), BooleanClause.Occur.SHOULD);
+        TopDocs hits4 = bigSearcher.search(q3, 1);
+        assertEquals(mulFactor*collector.totalHits + NUM_EXTRA_DOCS/2, hits4.totalHits);
+      }
+
+    } catch (Exception e) {
+      // For easier debugging
+      System.out.println("failed query: " + q1);
+      throw e;
+    }
+
+    // System.out.println("Total hits:"+tot);
+  }
+
+
+  // used to set properties or change every BooleanQuery
+  // generated from randBoolQuery.
+  public static interface Callback {
+    public void postCreate(BooleanQuery q);
+  }
+
+  // Random rnd is passed in so that the exact same random query may be created
+  // more than once.
+  public static BooleanQuery randBoolQuery(Random rnd, boolean allowMust, int level, String field, String[] vals, Callback cb) {
+    BooleanQuery current = new BooleanQuery(rnd.nextInt()<0);
+    for (int i=0; i<rnd.nextInt(vals.length)+1; i++) {
+      int qType=0; // term query
+      if (level>0) {
+        qType = rnd.nextInt(10);
+      }
+      Query q;
+      if (qType < 3) {
+        q = new TermQuery(new Term(field, vals[rnd.nextInt(vals.length)]));
+      } else if (qType < 7) {
+        q = new WildcardQuery(new Term(field, "w*"));
+      } else {
+        q = randBoolQuery(rnd, allowMust, level-1, field, vals, cb);
+      }
+
+      int r = rnd.nextInt(10);
+      BooleanClause.Occur occur;
+      if (r<2) {
+        occur=BooleanClause.Occur.MUST_NOT;
+      }
+      else if (r<5) {
+        if (allowMust) {
+          occur=BooleanClause.Occur.MUST;
+        } else {
+          occur=BooleanClause.Occur.SHOULD;
+        }
+      } else {
+        occur=BooleanClause.Occur.SHOULD;
+      }
+
+      current.add(q, occur);
+    }
+    if (cb!=null) cb.postCreate(current);
+    return current;
+  }
+
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java b/lucene/backwards/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
new file mode 100644
index 0000000..0bf05ad
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
@@ -0,0 +1,390 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import java.text.DecimalFormat;
+import java.util.Random;
+
+/** Test that BooleanQuery.setMinimumNumberShouldMatch works.
+ */
+public class TestBooleanMinShouldMatch extends LuceneTestCase {
+
+    private static Directory index;
+    private static IndexReader r;
+    private static IndexSearcher s;
+
+    @BeforeClass
+    public static void beforeClass() throws Exception {
+        String[] data = new String [] {
+            "A 1 2 3 4 5 6",
+            "Z       4 5 6",
+            null,
+            "B   2   4 5 6",
+            "Y     3   5 6",
+            null,
+            "C     3     6",
+            "X       4 5 6"
+        };
+
+        index = newDirectory();
+        RandomIndexWriter w = new RandomIndexWriter(random, index);
+
+        for (int i = 0; i < data.length; i++) {
+            Document doc = new Document();
+            doc.add(newField("id", String.valueOf(i), Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id",String.valueOf(i)));
+            doc.add(newField("all", "all", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("all","all"));
+            if (null != data[i]) {
+                doc.add(newField("data", data[i], Field.Store.YES, Field.Index.ANALYZED));//Field.Text("data",data[i]));
+            }
+            w.addDocument(doc);
+        }
+
+        r = w.getReader();
+        s = newSearcher(r);
+        w.close();
+//System.out.println("Set up " + getName());
+    }
+    
+    @AfterClass
+    public static void afterClass() throws Exception {
+      s.close();
+      s = null;
+      r.close();
+      r = null;
+      index.close();
+      index = null;
+    }
+
+
+    public void verifyNrHits(Query q, int expected) throws Exception {
+        ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+        if (expected != h.length) {
+            printHits(getName(), h, s);
+        }
+        assertEquals("result count", expected, h.length);
+        QueryUtils.check(random, q,s);
+    }
+
+    public void testAllOptional() throws Exception {
+
+        BooleanQuery q = new BooleanQuery();
+        for (int i = 1; i <=4; i++) {
+            q.add(new TermQuery(new Term("data",""+i)), BooleanClause.Occur.SHOULD);//false, false);
+        }
+        q.setMinimumNumberShouldMatch(2); // match at least two of 4
+        verifyNrHits(q, 2);
+    }
+
+    public void testOneReqAndSomeOptional() throws Exception {
+
+        /* one required, some optional */
+        BooleanQuery q = new BooleanQuery();
+        q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true,  false);
+        q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.SHOULD);//false, false);
+
+        q.setMinimumNumberShouldMatch(2); // 2 of 3 optional 
+
+        verifyNrHits(q, 5);
+    }
+
+    public void testSomeReqAndSomeOptional() throws Exception {
+
+        /* two required, some optional */
+        BooleanQuery q = new BooleanQuery();
+        q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true,  false);
+        q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);//true,  false);
+        q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.SHOULD);//false, false);
+
+        q.setMinimumNumberShouldMatch(2); // 2 of 3 optional 
+
+        verifyNrHits(q, 5);
+    }
+
+    public void testOneProhibAndSomeOptional() throws Exception {
+
+        /* one prohibited, some optional */
+        BooleanQuery q = new BooleanQuery();
+        q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+        q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
+
+        q.setMinimumNumberShouldMatch(2); // 2 of 3 optional 
+
+        verifyNrHits(q, 1);
+    }
+
+    public void testSomeProhibAndSomeOptional() throws Exception {
+
+        /* two prohibited, some optional */
+        BooleanQuery q = new BooleanQuery();
+        q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+        q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "C"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+
+        q.setMinimumNumberShouldMatch(2); // 2 of 3 optional 
+
+        verifyNrHits(q, 1);
+    }
+
+    public void testOneReqOneProhibAndSomeOptional() throws Exception {
+
+        /* one required, one prohibited, some optional */
+        BooleanQuery q = new BooleanQuery();
+        q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);// true,  false);
+        q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
+
+        q.setMinimumNumberShouldMatch(3); // 3 of 4 optional 
+
+        verifyNrHits(q, 1);
+    }
+
+    public void testSomeReqOneProhibAndSomeOptional() throws Exception {
+
+        /* two required, one prohibited, some optional */
+        BooleanQuery q = new BooleanQuery();
+        q.add(new TermQuery(new Term("all",  "all")), BooleanClause.Occur.MUST);//true,  false);
+        q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);//true,  false);
+        q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
+
+        q.setMinimumNumberShouldMatch(3); // 3 of 4 optional 
+
+        verifyNrHits(q, 1);
+    }
+
+    public void testOneReqSomeProhibAndSomeOptional() throws Exception {
+
+        /* one required, two prohibited, some optional */
+        BooleanQuery q = new BooleanQuery();
+        q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);//true,  false);
+        q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "C"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+
+        q.setMinimumNumberShouldMatch(3); // 3 of 4 optional 
+
+        verifyNrHits(q, 1);
+    }
+
+    public void testSomeReqSomeProhibAndSomeOptional() throws Exception {
+
+        /* two required, two prohibited, some optional */
+        BooleanQuery q = new BooleanQuery();
+        q.add(new TermQuery(new Term("all",  "all")), BooleanClause.Occur.MUST);//true,  false);
+        q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);//true,  false);
+        q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "C"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+
+        q.setMinimumNumberShouldMatch(3); // 3 of 4 optional 
+
+        verifyNrHits(q, 1);
+    }
+
+    public void testMinHigherThenNumOptional() throws Exception {
+
+        /* two required, two prohibited, some optional */
+        BooleanQuery q = new BooleanQuery();
+        q.add(new TermQuery(new Term("all",  "all")), BooleanClause.Occur.MUST);//true,  false);
+        q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);//true,  false);
+        q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "C"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+
+        q.setMinimumNumberShouldMatch(90); // 90 of 4 optional ?!?!?!
+
+        verifyNrHits(q, 0);
+    }
+
+    public void testMinEqualToNumOptional() throws Exception {
+
+        /* two required, two optional */
+        BooleanQuery q = new BooleanQuery();
+        q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);//true,  false);
+        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST);//true,  false);
+        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
+
+        q.setMinimumNumberShouldMatch(2); // 2 of 2 optional 
+
+        verifyNrHits(q, 1);
+    }
+
+    public void testOneOptionalEqualToMin() throws Exception {
+
+        /* two required, one optional */
+        BooleanQuery q = new BooleanQuery();
+        q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true,  false);
+        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.SHOULD);//false, false);
+        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.MUST);//true,  false);
+
+        q.setMinimumNumberShouldMatch(1); // 1 of 1 optional 
+
+        verifyNrHits(q, 1);
+    }
+
+    public void testNoOptionalButMin() throws Exception {
+
+        /* two required, no optional */
+        BooleanQuery q = new BooleanQuery();
+        q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true,  false);
+        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.MUST);//true,  false);
+
+        q.setMinimumNumberShouldMatch(1); // 1 of 0 optional 
+
+        verifyNrHits(q, 0);
+    }
+
+    public void testNoOptionalButMin2() throws Exception {
+
+        /* one required, no optional */
+        BooleanQuery q = new BooleanQuery();
+        q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true,  false);
+
+        q.setMinimumNumberShouldMatch(1); // 1 of 0 optional 
+
+        verifyNrHits(q, 0);
+    }
+
+    public void testRandomQueries() throws Exception {
+      String field="data";
+      String[] vals = {"1","2","3","4","5","6","A","Z","B","Y","Z","X","foo"};
+      int maxLev=4;
+
+      // callback object to set a random setMinimumNumberShouldMatch
+      TestBoolean2.Callback minNrCB = new TestBoolean2.Callback() {
+        public void postCreate(BooleanQuery q) {
+          BooleanClause[] c =q.getClauses();
+          int opt=0;
+          for (int i=0; i<c.length;i++) {
+            if (c[i].getOccur() == BooleanClause.Occur.SHOULD) opt++;
+          }
+          q.setMinimumNumberShouldMatch(random.nextInt(opt+2));
+        }
+      };
+
+
+
+      // increase number of iterations for more complete testing      
+      int num = atLeast(10);
+      for (int i = 0; i < num; i++) {
+        int lev = random.nextInt(maxLev);
+        final long seed = random.nextLong();
+        BooleanQuery q1 = TestBoolean2.randBoolQuery(new Random(seed), true, lev, field, vals, null);
+        // BooleanQuery q2 = TestBoolean2.randBoolQuery(new Random(seed), lev, field, vals, minNrCB);
+        BooleanQuery q2 = TestBoolean2.randBoolQuery(new Random(seed), true, lev, field, vals, null);
+        // only set minimumNumberShouldMatch on the top level query since setting
+        // at a lower level can change the score.
+        minNrCB.postCreate(q2);
+
+        // Can't use Hits because normalized scores will mess things
+        // up.  The non-sorting version of search() that returns TopDocs
+        // will not normalize scores.
+        TopDocs top1 = s.search(q1,null,100);
+        TopDocs top2 = s.search(q2,null,100);
+        if (i < 100) {
+          QueryUtils.check(random, q1,s);
+          QueryUtils.check(random, q2,s);
+        }
+        // The constrained query
+        // should be a superset to the unconstrained query.
+        if (top2.totalHits > top1.totalHits) {
+          fail("Constrained results not a subset:\n"
+                        + CheckHits.topdocsString(top1,0,0)
+                        + CheckHits.topdocsString(top2,0,0)
+                        + "for query:" + q2.toString());
+        }
+
+        for (int hit=0; hit<top2.totalHits; hit++) {
+          int id = top2.scoreDocs[hit].doc;
+          float score = top2.scoreDocs[hit].score;
+          boolean found=false;
+          // find this doc in other hits
+          for (int other=0; other<top1.totalHits; other++) {
+            if (top1.scoreDocs[other].doc == id) {
+              found=true;
+              float otherScore = top1.scoreDocs[other].score;
+              // check if scores match
+              if (Math.abs(otherScore-score)>1.0e-6f) {
+                        fail("Doc " + id + " scores don't match\n"
+                + CheckHits.topdocsString(top1,0,0)
+                + CheckHits.topdocsString(top2,0,0)
+                + "for query:" + q2.toString());
+              }
+            }
+          }
+
+          // check if subset
+          if (!found) fail("Doc " + id + " not found\n"
+                + CheckHits.topdocsString(top1,0,0)
+                + CheckHits.topdocsString(top2,0,0)
+                + "for query:" + q2.toString());
+        }
+      }
+      // System.out.println("Total hits:"+tot);
+    }
+
+
+
+    protected void printHits(String test, ScoreDoc[] h, Searcher searcher) throws Exception {
+
+        System.err.println("------- " + test + " -------");
+
+        DecimalFormat f = new DecimalFormat("0.000000");
+
+        for (int i = 0; i < h.length; i++) {
+            Document d = searcher.doc(h[i].doc);
+            float score = h[i].score;
+            System.err.println("#" + i + ": " + f.format(score) + " - " +
+                               d.get("id") + " - " + d.get("data"));
+        }
+    }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestBooleanOr.java b/lucene/backwards/src/test/org/apache/lucene/search/TestBooleanOr.java
new file mode 100644
index 0000000..169cae1
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestBooleanOr.java
@@ -0,0 +1,168 @@
+package org.apache.lucene.search;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+
+public class TestBooleanOr extends LuceneTestCase {
+
+  private static String FIELD_T = "T";
+  private static String FIELD_C = "C";
+
+  private TermQuery t1 = new TermQuery(new Term(FIELD_T, "files"));
+  private TermQuery t2 = new TermQuery(new Term(FIELD_T, "deleting"));
+  private TermQuery c1 = new TermQuery(new Term(FIELD_C, "production"));
+  private TermQuery c2 = new TermQuery(new Term(FIELD_C, "optimize"));
+
+  private IndexSearcher searcher = null;
+  private Directory dir;
+  private IndexReader reader;
+  
+
+  private int search(Query q) throws IOException {
+    QueryUtils.check(random, q,searcher);
+    return searcher.search(q, null, 1000).totalHits;
+  }
+
+  public void testElements() throws IOException {
+    assertEquals(1, search(t1));
+    assertEquals(1, search(t2));
+    assertEquals(1, search(c1));
+    assertEquals(1, search(c2));
+  }
+
+  /**
+   * <code>T:files T:deleting C:production C:optimize </code>
+   * it works.
+   *
+   * @throws IOException
+   */
+  public void testFlat() throws IOException {
+    BooleanQuery q = new BooleanQuery();
+    q.add(new BooleanClause(t1, BooleanClause.Occur.SHOULD));
+    q.add(new BooleanClause(t2, BooleanClause.Occur.SHOULD));
+    q.add(new BooleanClause(c1, BooleanClause.Occur.SHOULD));
+    q.add(new BooleanClause(c2, BooleanClause.Occur.SHOULD));
+    assertEquals(1, search(q));
+  }
+
+  /**
+   * <code>(T:files T:deleting) (+C:production +C:optimize)</code>
+   * it works.
+   *
+   * @throws IOException
+   */
+  public void testParenthesisMust() throws IOException {
+    BooleanQuery q3 = new BooleanQuery();
+    q3.add(new BooleanClause(t1, BooleanClause.Occur.SHOULD));
+    q3.add(new BooleanClause(t2, BooleanClause.Occur.SHOULD));
+    BooleanQuery q4 = new BooleanQuery();
+    q4.add(new BooleanClause(c1, BooleanClause.Occur.MUST));
+    q4.add(new BooleanClause(c2, BooleanClause.Occur.MUST));
+    BooleanQuery q2 = new BooleanQuery();
+    q2.add(q3, BooleanClause.Occur.SHOULD);
+    q2.add(q4, BooleanClause.Occur.SHOULD);
+    assertEquals(1, search(q2));
+  }
+
+  /**
+   * <code>(T:files T:deleting) +(C:production C:optimize)</code>
+   * not working. results NO HIT.
+   *
+   * @throws IOException
+   */
+  public void testParenthesisMust2() throws IOException {
+    BooleanQuery q3 = new BooleanQuery();
+    q3.add(new BooleanClause(t1, BooleanClause.Occur.SHOULD));
+    q3.add(new BooleanClause(t2, BooleanClause.Occur.SHOULD));
+    BooleanQuery q4 = new BooleanQuery();
+    q4.add(new BooleanClause(c1, BooleanClause.Occur.SHOULD));
+    q4.add(new BooleanClause(c2, BooleanClause.Occur.SHOULD));
+    BooleanQuery q2 = new BooleanQuery();
+    q2.add(q3, BooleanClause.Occur.SHOULD);
+    q2.add(q4, BooleanClause.Occur.MUST);
+    assertEquals(1, search(q2));
+  }
+
+  /**
+   * <code>(T:files T:deleting) (C:production C:optimize)</code>
+   * not working. results NO HIT.
+   *
+   * @throws IOException
+   */
+  public void testParenthesisShould() throws IOException {
+    BooleanQuery q3 = new BooleanQuery();
+    q3.add(new BooleanClause(t1, BooleanClause.Occur.SHOULD));
+    q3.add(new BooleanClause(t2, BooleanClause.Occur.SHOULD));
+    BooleanQuery q4 = new BooleanQuery();
+    q4.add(new BooleanClause(c1, BooleanClause.Occur.SHOULD));
+    q4.add(new BooleanClause(c2, BooleanClause.Occur.SHOULD));
+    BooleanQuery q2 = new BooleanQuery();
+    q2.add(q3, BooleanClause.Occur.SHOULD);
+    q2.add(q4, BooleanClause.Occur.SHOULD);
+    assertEquals(1, search(q2));
+  }
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+
+    //
+    dir = newDirectory();
+
+
+    //
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir);
+
+    //
+    Document d = new Document();
+    d.add(newField(
+        FIELD_T,
+        "Optimize not deleting all files",
+        Field.Store.YES,
+        Field.Index.ANALYZED));
+    d.add(newField(
+        FIELD_C,
+        "Deleted When I run an optimize in our production environment.",
+        Field.Store.YES,
+        Field.Index.ANALYZED));
+
+    //
+    writer.addDocument(d);
+
+    reader = writer.getReader();
+    //
+    searcher = newSearcher(reader);
+    writer.close();
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    searcher.close();
+    reader.close();
+    dir.close();
+    super.tearDown();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestBooleanQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/TestBooleanQuery.java
new file mode 100644
index 0000000..eea83e8
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestBooleanQuery.java
@@ -0,0 +1,158 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestBooleanQuery extends LuceneTestCase {
+  
+  public void testEquality() throws Exception {
+    BooleanQuery bq1 = new BooleanQuery();
+    bq1.add(new TermQuery(new Term("field", "value1")), BooleanClause.Occur.SHOULD);
+    bq1.add(new TermQuery(new Term("field", "value2")), BooleanClause.Occur.SHOULD);
+    BooleanQuery nested1 = new BooleanQuery();
+    nested1.add(new TermQuery(new Term("field", "nestedvalue1")), BooleanClause.Occur.SHOULD);
+    nested1.add(new TermQuery(new Term("field", "nestedvalue2")), BooleanClause.Occur.SHOULD);
+    bq1.add(nested1, BooleanClause.Occur.SHOULD);
+
+    BooleanQuery bq2 = new BooleanQuery();
+    bq2.add(new TermQuery(new Term("field", "value1")), BooleanClause.Occur.SHOULD);
+    bq2.add(new TermQuery(new Term("field", "value2")), BooleanClause.Occur.SHOULD);
+    BooleanQuery nested2 = new BooleanQuery();
+    nested2.add(new TermQuery(new Term("field", "nestedvalue1")), BooleanClause.Occur.SHOULD);
+    nested2.add(new TermQuery(new Term("field", "nestedvalue2")), BooleanClause.Occur.SHOULD);
+    bq2.add(nested2, BooleanClause.Occur.SHOULD);
+
+    assertEquals(bq1, bq2);
+  }
+
+  public void testException() {
+    try {
+      BooleanQuery.setMaxClauseCount(0);
+      fail();
+    } catch (IllegalArgumentException e) {
+      // okay
+    }
+  }
+
+  // LUCENE-1630
+  public void testNullOrSubScorer() throws Throwable {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random, dir);
+    Document doc = new Document();
+    doc.add(newField("field", "a b c d", Field.Store.NO, Field.Index.ANALYZED));
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    IndexSearcher s = newSearcher(r);
+    BooleanQuery q = new BooleanQuery();
+    q.add(new TermQuery(new Term("field", "a")), BooleanClause.Occur.SHOULD);
+
+    // LUCENE-2617: make sure that a term not in the index still contributes to the score via coord factor
+    float score = s.search(q, 10).getMaxScore();
+    Query subQuery = new TermQuery(new Term("field", "not_in_index"));
+    subQuery.setBoost(0);
+    q.add(subQuery, BooleanClause.Occur.SHOULD);
+    float score2 = s.search(q, 10).getMaxScore();
+    assertEquals(score*.5, score2, 1e-6);
+
+    // now test BooleanScorer2
+    subQuery = new TermQuery(new Term("field", "b"));
+    subQuery.setBoost(0);
+    q.add(subQuery, BooleanClause.Occur.MUST);
+    score2 = s.search(q, 10).getMaxScore();
+    assertEquals(score*(2.0/3), score2, 1e-6);
+ 
+    // PhraseQuery w/ no terms added returns a null scorer
+    PhraseQuery pq = new PhraseQuery();
+    q.add(pq, BooleanClause.Occur.SHOULD);
+    assertEquals(1, s.search(q, 10).totalHits);
+
+    // A required clause which returns null scorer should return null scorer to
+    // IndexSearcher.
+    q = new BooleanQuery();
+    pq = new PhraseQuery();
+    q.add(new TermQuery(new Term("field", "a")), BooleanClause.Occur.SHOULD);
+    q.add(pq, BooleanClause.Occur.MUST);
+    assertEquals(0, s.search(q, 10).totalHits);
+
+    DisjunctionMaxQuery dmq = new DisjunctionMaxQuery(1.0f);
+    dmq.add(new TermQuery(new Term("field", "a")));
+    dmq.add(pq);
+    assertEquals(1, s.search(dmq, 10).totalHits);
+    
+    s.close();
+    r.close();
+    w.close();
+    dir.close();
+  }
+  
+  public void testDeMorgan() throws Exception {
+    Directory dir1 = newDirectory();
+    RandomIndexWriter iw1 = new RandomIndexWriter(random, dir1);
+    Document doc1 = new Document();
+    doc1.add(newField("field", "foo bar", Field.Index.ANALYZED));
+    iw1.addDocument(doc1);
+    IndexReader reader1 = iw1.getReader();
+    iw1.close();
+    
+    Directory dir2 = newDirectory();
+    RandomIndexWriter iw2 = new RandomIndexWriter(random, dir2);
+    Document doc2 = new Document();
+    doc2.add(newField("field", "foo baz", Field.Index.ANALYZED));
+    iw2.addDocument(doc2);
+    IndexReader reader2 = iw2.getReader();
+    iw2.close();
+    
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random));
+    qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
+    
+    MultiReader multireader = new MultiReader(reader1, reader2);
+    IndexSearcher searcher = newSearcher(multireader);
+    assertEquals(0, searcher.search(qp.parse("+foo -ba*"), 10).totalHits);
+    searcher.close();
+    
+    final ExecutorService es = Executors.newCachedThreadPool();
+    searcher = new IndexSearcher(multireader, es);
+    if (VERBOSE)
+      System.out.println("rewritten form: " + searcher.rewrite(qp.parse("+foo -ba*")));
+    assertEquals(0, searcher.search(qp.parse("+foo -ba*"), 10).totalHits);
+    es.shutdown();
+    es.awaitTermination(1, TimeUnit.SECONDS);
+
+    multireader.close();
+    reader1.close();
+    reader2.close();
+    dir1.close();
+    dir2.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestBooleanScorer.java b/lucene/backwards/src/test/org/apache/lucene/search/TestBooleanScorer.java
new file mode 100644
index 0000000..f4ce78d
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestBooleanScorer.java
@@ -0,0 +1,93 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestBooleanScorer extends LuceneTestCase
+{
+  private static final String FIELD = "category";
+  
+  public void testMethod() throws Exception {
+    Directory directory = newDirectory();
+
+    String[] values = new String[] { "1", "2", "3", "4" };
+
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory);
+    for (int i = 0; i < values.length; i++) {
+      Document doc = new Document();
+      doc.add(newField(FIELD, values[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
+      writer.addDocument(doc);
+    }
+    IndexReader ir = writer.getReader();
+    writer.close();
+
+    BooleanQuery booleanQuery1 = new BooleanQuery();
+    booleanQuery1.add(new TermQuery(new Term(FIELD, "1")), BooleanClause.Occur.SHOULD);
+    booleanQuery1.add(new TermQuery(new Term(FIELD, "2")), BooleanClause.Occur.SHOULD);
+
+    BooleanQuery query = new BooleanQuery();
+    query.add(booleanQuery1, BooleanClause.Occur.MUST);
+    query.add(new TermQuery(new Term(FIELD, "9")), BooleanClause.Occur.MUST_NOT);
+
+    IndexSearcher indexSearcher = newSearcher(ir);
+    ScoreDoc[] hits = indexSearcher.search(query, null, 1000).scoreDocs;
+    assertEquals("Number of matched documents", 2, hits.length);
+    indexSearcher.close();
+    ir.close();
+    directory.close();
+  }
+  
+  public void testEmptyBucketWithMoreDocs() throws Exception {
+    // This test checks the logic of nextDoc() when all sub scorers have docs
+    // beyond the first bucket (for example). Currently, the code relies on the
+    // 'more' variable to work properly, and this test ensures that if the logic
+    // changes, we have a test to back it up.
+    
+    Similarity sim = Similarity.getDefault();
+    Scorer[] scorers = new Scorer[] {new Scorer(sim) {
+      private int doc = -1;
+      @Override public float score() throws IOException { return 0; }
+      @Override public int docID() { return doc; }
+      
+      @Override public int nextDoc() throws IOException {
+        return doc = doc == -1 ? 3000 : NO_MORE_DOCS;
+      }
+
+      @Override public int advance(int target) throws IOException {
+        return doc = target <= 3000 ? 3000 : NO_MORE_DOCS;
+      }
+      
+    }};
+    BooleanScorer bs = new BooleanScorer(null, false, sim, 1, Arrays.asList(scorers), null, scorers.length);
+    
+    assertEquals("should have received 3000", 3000, bs.nextDoc());
+    assertEquals("should have received NO_MORE_DOCS", DocIdSetIterator.NO_MORE_DOCS, bs.nextDoc());
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestCachingCollector.java b/lucene/backwards/src/test/org/apache/lucene/search/TestCachingCollector.java
new file mode 100755
index 0000000..66ce1d2
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestCachingCollector.java
@@ -0,0 +1,184 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.util.LuceneTestCase;
+
+import java.io.IOException;
+
+public class TestCachingCollector extends LuceneTestCase {
+
+  private static final double ONE_BYTE = 1.0 / (1024 * 1024); // 1 byte out of MB
+  
+  private static class MockScorer extends Scorer {
+    
+    private MockScorer() {
+      super((Weight) null);
+    }
+    
+    @Override
+    public float score() throws IOException { return 0; }
+
+    @Override
+    public int docID() { return 0; }
+
+    @Override
+    public int nextDoc() throws IOException { return 0; }
+
+    @Override
+    public int advance(int target) throws IOException { return 0; }
+    
+  }
+  
+  private static class NoOpCollector extends Collector {
+
+    private final boolean acceptDocsOutOfOrder;
+    
+    public NoOpCollector(boolean acceptDocsOutOfOrder) {
+      this.acceptDocsOutOfOrder = acceptDocsOutOfOrder;
+    }
+    
+    @Override
+    public void setScorer(Scorer scorer) throws IOException {}
+
+    @Override
+    public void collect(int doc) throws IOException {}
+
+    @Override
+    public void setNextReader(IndexReader reader, int docBase) throws IOException {}
+
+    @Override
+    public boolean acceptsDocsOutOfOrder() {
+      return acceptDocsOutOfOrder;
+    }
+    
+  }
+
+  public void testBasic() throws Exception {
+    for (boolean cacheScores : new boolean[] { false, true }) {
+      CachingCollector cc = CachingCollector.create(new NoOpCollector(false), cacheScores, 1.0);
+      cc.setScorer(new MockScorer());
+      
+      // collect 1000 docs
+      for (int i = 0; i < 1000; i++) {
+        cc.collect(i);
+      }
+      
+      // now replay them
+      cc.replay(new Collector() {
+        int prevDocID = -1;
+        
+        @Override
+        public void setScorer(Scorer scorer) throws IOException {}
+        
+        @Override
+        public void setNextReader(IndexReader reader, int docBase) throws IOException {}
+        
+        @Override
+        public void collect(int doc) throws IOException {
+          assertEquals(prevDocID + 1, doc);
+          prevDocID = doc;
+        }
+        
+        @Override
+        public boolean acceptsDocsOutOfOrder() {
+          return false;
+        }
+      });
+    }
+  }
+  
+  public void testIllegalStateOnReplay() throws Exception {
+    CachingCollector cc = CachingCollector.create(new NoOpCollector(false), true, 50 * ONE_BYTE);
+    cc.setScorer(new MockScorer());
+    
+    // collect 130 docs, this should be enough for triggering cache abort.
+    for (int i = 0; i < 130; i++) {
+      cc.collect(i);
+    }
+    
+    assertFalse("CachingCollector should not be cached due to low memory limit", cc.isCached());
+    
+    try {
+      cc.replay(new NoOpCollector(false));
+      fail("replay should fail if CachingCollector is not cached");
+    } catch (IllegalStateException e) {
+      // expected
+    }
+  }
+  
+  public void testIllegalCollectorOnReplay() throws Exception {
+    // tests that the Collector passed to replay() has an out-of-order mode that
+    // is valid with the Collector passed to the ctor
+    
+    // 'src' Collector does not support out-of-order
+    CachingCollector cc = CachingCollector.create(new NoOpCollector(false), true, 50 * ONE_BYTE);
+    cc.setScorer(new MockScorer());
+    for (int i = 0; i < 10; i++) cc.collect(i);
+    cc.replay(new NoOpCollector(true)); // this call should not fail
+    cc.replay(new NoOpCollector(false)); // this call should not fail
+
+    // 'src' Collector supports out-of-order
+    cc = CachingCollector.create(new NoOpCollector(true), true, 50 * ONE_BYTE);
+    cc.setScorer(new MockScorer());
+    for (int i = 0; i < 10; i++) cc.collect(i);
+    cc.replay(new NoOpCollector(true)); // this call should not fail
+    try {
+      cc.replay(new NoOpCollector(false)); // this call should fail
+      fail("should have failed if an in-order Collector was given to replay(), " +
+      		"while CachingCollector was initialized with out-of-order collection");
+    } catch (IllegalArgumentException e) {
+      // ok
+    }
+  }
+  
+  public void testCachedArraysAllocation() throws Exception {
+    // tests the cached arrays allocation -- if the 'nextLength' was too high,
+    // caching would terminate even if a smaller length would suffice.
+    
+    // set RAM limit enough for 150 docs + random(10000)
+    int numDocs = random.nextInt(10000) + 150;
+    for (boolean cacheScores : new boolean[] { false, true }) {
+      int bytesPerDoc = cacheScores ? 8 : 4;
+      CachingCollector cc = CachingCollector.create(new NoOpCollector(false),
+          cacheScores, bytesPerDoc * ONE_BYTE * numDocs);
+      cc.setScorer(new MockScorer());
+      for (int i = 0; i < numDocs; i++) cc.collect(i);
+      assertTrue(cc.isCached());
+      
+      // The 151's document should terminate caching
+      cc.collect(numDocs);
+      assertFalse(cc.isCached());
+    }
+  }
+
+  public void testNoWrappedCollector() throws Exception {
+    for (boolean cacheScores : new boolean[] { false, true }) {
+      // create w/ null wrapped collector, and test that the methods work
+      CachingCollector cc = CachingCollector.create(true, cacheScores, 50 * ONE_BYTE);
+      cc.setNextReader(null, 0);
+      cc.setScorer(new MockScorer());
+      cc.collect(0);
+      
+      assertTrue(cc.isCached());
+      cc.replay(new NoOpCollector(true));
+    }
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestCachingSpanFilter.java b/lucene/backwards/src/test/org/apache/lucene/search/TestCachingSpanFilter.java
new file mode 100644
index 0000000..edd308a
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestCachingSpanFilter.java
@@ -0,0 +1,158 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.SerialMergeScheduler;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestCachingSpanFilter extends LuceneTestCase {
+
+  public void testEnforceDeletions() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(
+        random,
+        dir,
+        newIndexWriterConfig(random, TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMergeScheduler(new SerialMergeScheduler()).
+            // asserts below requires no unexpected merges:
+            setMergePolicy(newLogMergePolicy(10))
+    );
+
+    // NOTE: cannot use writer.getReader because RIW (on
+    // flipping a coin) may give us a newly opened reader,
+    // but we use .reopen on this reader below and expect to
+    // (must) get an NRT reader:
+    IndexReader reader = IndexReader.open(writer.w, true);
+    // same reason we don't wrap?
+    IndexSearcher searcher = newSearcher(reader, false);
+
+    // add a doc, refresh the reader, and check that its there
+    Document doc = new Document();
+    doc.add(newField("id", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
+    writer.addDocument(doc);
+
+    reader = refreshReader(reader);
+    searcher.close();
+    searcher = newSearcher(reader, false);
+
+    TopDocs docs = searcher.search(new MatchAllDocsQuery(), 1);
+    assertEquals("Should find a hit...", 1, docs.totalHits);
+
+    final SpanFilter startFilter = new SpanQueryFilter(new SpanTermQuery(new Term("id", "1")));
+
+    // ignore deletions
+    CachingSpanFilter filter = new CachingSpanFilter(startFilter, CachingWrapperFilter.DeletesMode.IGNORE);
+        
+    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
+    assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
+    ConstantScoreQuery constantScore = new ConstantScoreQuery(filter);
+    docs = searcher.search(constantScore, 1);
+    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
+
+    // now delete the doc, refresh the reader, and see that
+    // it's not there
+    _TestUtil.keepFullyDeletedSegments(writer.w);
+    writer.deleteDocuments(new Term("id", "1"));
+
+    reader = refreshReader(reader);
+    searcher.close();
+    searcher = newSearcher(reader, false);
+
+    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
+    assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
+
+    docs = searcher.search(constantScore, 1);
+    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
+
+
+    // force cache to regenerate:
+    filter = new CachingSpanFilter(startFilter, CachingWrapperFilter.DeletesMode.RECACHE);
+
+    writer.addDocument(doc);
+    reader = refreshReader(reader);
+    searcher.close();
+    searcher = newSearcher(reader, false);
+        
+    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
+    assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
+
+    constantScore = new ConstantScoreQuery(filter);
+    docs = searcher.search(constantScore, 1);
+    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
+
+    // NOTE: important to hold ref here so GC doesn't clear
+    // the cache entry!  Else the assert below may sometimes
+    // fail:
+    IndexReader oldReader = reader;
+
+    // make sure we get a cache hit when we reopen readers
+    // that had no new deletions
+    reader = refreshReader(reader);
+    assertTrue(reader != oldReader);
+    searcher.close();
+    searcher = newSearcher(reader, false);
+    int missCount = filter.missCount;
+    docs = searcher.search(constantScore, 1);
+    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
+    assertEquals(missCount, filter.missCount);
+
+    // now delete the doc, refresh the reader, and see that it's not there
+    writer.deleteDocuments(new Term("id", "1"));
+
+    reader = refreshReader(reader);
+    searcher.close();
+    searcher = newSearcher(reader, false);
+
+    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
+    assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
+
+    docs = searcher.search(constantScore, 1);
+    assertEquals("[just filter] Should *not* find a hit...", 0, docs.totalHits);
+
+    // NOTE: silliness to make sure JRE does not optimize
+    // away our holding onto oldReader to prevent
+    // CachingWrapperFilter's WeakHashMap from dropping the
+    // entry:
+    assertTrue(oldReader != null);
+
+    searcher.close();
+    writer.close();
+    reader.close();
+    dir.close();
+  }
+
+  private static IndexReader refreshReader(IndexReader reader) throws IOException {
+    IndexReader oldReader = reader;
+    reader = reader.reopen();
+    if (reader != oldReader) {
+      oldReader.close();
+    }
+    return reader;
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java b/lucene/backwards/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java
new file mode 100644
index 0000000..6cb28af
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java
@@ -0,0 +1,307 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.SerialMergeScheduler;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util._TestUtil;
+
+public class TestCachingWrapperFilter extends LuceneTestCase {
+
+  public void testCachingWorks() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir);
+    writer.close();
+
+    IndexReader reader = IndexReader.open(dir, true);
+
+    MockFilter filter = new MockFilter();
+    CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
+
+    // first time, nested filter is called
+    cacher.getDocIdSet(reader);
+    assertTrue("first time", filter.wasCalled());
+
+    // make sure no exception if cache is holding the wrong docIdSet
+    cacher.getDocIdSet(reader);
+
+    // second time, nested filter should not be called
+    filter.clear();
+    cacher.getDocIdSet(reader);
+    assertFalse("second time", filter.wasCalled());
+
+    reader.close();
+    dir.close();
+  }
+  
+  public void testNullDocIdSet() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir);
+    writer.close();
+
+    IndexReader reader = IndexReader.open(dir, true);
+
+    final Filter filter = new Filter() {
+      @Override
+      public DocIdSet getDocIdSet(IndexReader reader) {
+        return null;
+      }
+    };
+    CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
+
+    // the caching filter should return the empty set constant
+    assertSame(DocIdSet.EMPTY_DOCIDSET, cacher.getDocIdSet(reader));
+    
+    reader.close();
+    dir.close();
+  }
+  
+  public void testNullDocIdSetIterator() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir);
+    writer.close();
+
+    IndexReader reader = IndexReader.open(dir, true);
+
+    final Filter filter = new Filter() {
+      @Override
+      public DocIdSet getDocIdSet(IndexReader reader) {
+        return new DocIdSet() {
+          @Override
+          public DocIdSetIterator iterator() {
+            return null;
+          }
+        };
+      }
+    };
+    CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
+
+    // the caching filter should return the empty set constant
+    assertSame(DocIdSet.EMPTY_DOCIDSET, cacher.getDocIdSet(reader));
+    
+    reader.close();
+    dir.close();
+  }
+  
+  private static void assertDocIdSetCacheable(IndexReader reader, Filter filter, boolean shouldCacheable) throws IOException {
+    final CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
+    final DocIdSet originalSet = filter.getDocIdSet(reader.getSequentialSubReaders()[0]);
+    final DocIdSet cachedSet = cacher.getDocIdSet(reader.getSequentialSubReaders()[0]);
+    assertTrue(cachedSet.isCacheable());
+    assertEquals(shouldCacheable, originalSet.isCacheable());
+    //System.out.println("Original: "+originalSet.getClass().getName()+" -- cached: "+cachedSet.getClass().getName());
+    if (originalSet.isCacheable()) {
+      assertEquals("Cached DocIdSet must be of same class like uncached, if cacheable", originalSet.getClass(), cachedSet.getClass());
+    } else {
+      assertTrue("Cached DocIdSet must be an FixedBitSet if the original one was not cacheable (got " + cachedSet + ")", cachedSet instanceof FixedBitSet || cachedSet == DocIdSet.EMPTY_DOCIDSET);
+    }
+  }
+  
+  public void testIsCacheAble() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir);
+    writer.addDocument(new Document());
+    writer.close();
+
+    IndexReader reader = IndexReader.open(dir, true);
+
+    // not cacheable:
+    assertDocIdSetCacheable(reader, new QueryWrapperFilter(new TermQuery(new Term("test","value"))), false);
+    // returns default empty docidset, always cacheable:
+    assertDocIdSetCacheable(reader, NumericRangeFilter.newIntRange("test", Integer.valueOf(10000), Integer.valueOf(-10000), true, true), true);
+    // is cacheable:
+    assertDocIdSetCacheable(reader, FieldCacheRangeFilter.newIntRange("test", Integer.valueOf(10), Integer.valueOf(20), true, true), true);
+    // a fixedbitset filter is always cacheable
+    assertDocIdSetCacheable(reader, new Filter() {
+      @Override
+      public DocIdSet getDocIdSet(IndexReader reader) {
+        return new FixedBitSet(reader.maxDoc());
+      }
+    }, true);
+
+    reader.close();
+    dir.close();
+  }
+
+  public void testEnforceDeletions() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(
+        random,
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMergeScheduler(new SerialMergeScheduler()).
+            // asserts below requires no unexpected merges:
+            setMergePolicy(newLogMergePolicy(10))
+    );
+
+    // NOTE: cannot use writer.getReader because RIW (on
+    // flipping a coin) may give us a newly opened reader,
+    // but we use .reopen on this reader below and expect to
+    // (must) get an NRT reader:
+    IndexReader reader = IndexReader.open(writer.w, true);
+    // same reason we don't wrap?
+    IndexSearcher searcher = newSearcher(reader, false);
+
+    // add a doc, refresh the reader, and check that its there
+    Document doc = new Document();
+    doc.add(newField("id", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
+    writer.addDocument(doc);
+
+    reader = refreshReader(reader);
+    searcher.close();
+    searcher = newSearcher(reader, false);
+
+    TopDocs docs = searcher.search(new MatchAllDocsQuery(), 1);
+    assertEquals("Should find a hit...", 1, docs.totalHits);
+
+    final Filter startFilter = new QueryWrapperFilter(new TermQuery(new Term("id", "1")));
+
+    // ignore deletions
+    CachingWrapperFilter filter = new CachingWrapperFilter(startFilter, CachingWrapperFilter.DeletesMode.IGNORE);
+        
+    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
+    assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
+    ConstantScoreQuery constantScore = new ConstantScoreQuery(filter);
+    docs = searcher.search(constantScore, 1);
+    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
+
+    // now delete the doc, refresh the reader, and see that it's not there
+    _TestUtil.keepFullyDeletedSegments(writer.w);
+    writer.deleteDocuments(new Term("id", "1"));
+
+    reader = refreshReader(reader);
+    searcher.close();
+    searcher = newSearcher(reader, false);
+
+    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
+    assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
+
+    docs = searcher.search(constantScore, 1);
+    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
+
+
+    // force cache to regenerate:
+    filter = new CachingWrapperFilter(startFilter, CachingWrapperFilter.DeletesMode.RECACHE);
+
+    writer.addDocument(doc);
+
+    reader = refreshReader(reader);
+    searcher.close();
+    searcher = newSearcher(reader, false);
+        
+    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
+
+    assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
+
+    constantScore = new ConstantScoreQuery(filter);
+    docs = searcher.search(constantScore, 1);
+    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
+
+    // NOTE: important to hold ref here so GC doesn't clear
+    // the cache entry!  Else the assert below may sometimes
+    // fail:
+    IndexReader oldReader = reader;
+
+    // make sure we get a cache hit when we reopen reader
+    // that had no change to deletions
+    reader = refreshReader(reader);
+    assertTrue(reader != oldReader);
+    searcher.close();
+    searcher = newSearcher(reader, false);
+    int missCount = filter.missCount;
+    docs = searcher.search(constantScore, 1);
+    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
+    assertEquals(missCount, filter.missCount);
+
+    // now delete the doc, refresh the reader, and see that it's not there
+    writer.deleteDocuments(new Term("id", "1"));
+
+    reader = refreshReader(reader);
+    searcher.close();
+    searcher = newSearcher(reader, false);
+
+    missCount = filter.missCount;
+    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
+    assertEquals(missCount+1, filter.missCount);
+    assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
+    docs = searcher.search(constantScore, 1);
+    assertEquals("[just filter] Should *not* find a hit...", 0, docs.totalHits);
+
+
+    // apply deletions dynamically
+    filter = new CachingWrapperFilter(startFilter, CachingWrapperFilter.DeletesMode.DYNAMIC);
+
+    writer.addDocument(doc);
+    reader = refreshReader(reader);
+    searcher.close();
+    searcher = newSearcher(reader, false);
+        
+    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
+    assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
+    constantScore = new ConstantScoreQuery(filter);
+    docs = searcher.search(constantScore, 1);
+    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
+
+    // now delete the doc, refresh the reader, and see that it's not there
+    writer.deleteDocuments(new Term("id", "1"));
+
+    reader = refreshReader(reader);
+    searcher.close();
+    searcher = newSearcher(reader, false);
+
+    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
+    assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
+
+    missCount = filter.missCount;
+    docs = searcher.search(constantScore, 1);
+    assertEquals("[just filter] Should *not* find a hit...", 0, docs.totalHits);
+
+    // doesn't count as a miss
+    assertEquals(missCount, filter.missCount);
+
+    // NOTE: silliness to make sure JRE does not optimize
+    // away our holding onto oldReader to prevent
+    // CachingWrapperFilter's WeakHashMap from dropping the
+    // entry:
+    assertTrue(oldReader != null);
+
+    searcher.close();
+    reader.close();
+    writer.close();
+    dir.close();
+  }
+
+  private static IndexReader refreshReader(IndexReader reader) throws IOException {
+    IndexReader oldReader = reader;
+    reader = reader.reopen();
+    if (reader != oldReader) {
+      oldReader.close();
+    }
+    return reader;
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestComplexExplanations.java b/lucene/backwards/src/test/org/apache/lucene/search/TestComplexExplanations.java
new file mode 100644
index 0000000..4c4789a
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestComplexExplanations.java
@@ -0,0 +1,273 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.spans.*;
+
+/**
+ * TestExplanations subclass that builds up super crazy complex queries
+ * on the assumption that if the explanations work out right for them,
+ * they should work for anything.
+ */
+public class TestComplexExplanations extends TestExplanations {
+
+  /**
+   * Override the Similarity used in our searcher with one that plays
+   * nice with boosts of 0.0
+   */
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    searcher.setSimilarity(createQnorm1Similarity());
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    searcher.close();
+    super.tearDown();
+  }
+
+  // must be static for weight serialization tests 
+  private static DefaultSimilarity createQnorm1Similarity() {
+    return new DefaultSimilarity() {
+        @Override
+        public float queryNorm(float sumOfSquaredWeights) {
+          return 1.0f; // / (float) Math.sqrt(1.0f + sumOfSquaredWeights);
+        }
+      };
+  }
+
+  
+  public void test1() throws Exception {
+    
+    BooleanQuery q = new BooleanQuery();
+    
+    q.add(qp.parse("\"w1 w2\"~1"), Occur.MUST);
+    q.add(snear(st("w2"),
+                sor("w5","zz"),
+                4, true),
+          Occur.SHOULD);
+    q.add(snear(sf("w3",2), st("w2"), st("w3"), 5, true),
+          Occur.SHOULD);
+    
+    Query t = new FilteredQuery(qp.parse("xx"),
+                                new ItemizedFilter(new int[] {1,3}));
+    t.setBoost(1000);
+    q.add(t, Occur.SHOULD);
+    
+    t = new ConstantScoreQuery(new ItemizedFilter(new int[] {0,2}));
+    t.setBoost(30);
+    q.add(t, Occur.SHOULD);
+    
+    DisjunctionMaxQuery dm = new DisjunctionMaxQuery(0.2f);
+    dm.add(snear(st("w2"),
+                 sor("w5","zz"),
+                 4, true));
+    dm.add(qp.parse("QQ"));
+    dm.add(qp.parse("xx yy -zz"));
+    dm.add(qp.parse("-xx -w1"));
+
+    DisjunctionMaxQuery dm2 = new DisjunctionMaxQuery(0.5f);
+    dm2.add(qp.parse("w1"));
+    dm2.add(qp.parse("w2"));
+    dm2.add(qp.parse("w3"));
+    dm.add(dm2);
+
+    q.add(dm, Occur.SHOULD);
+
+    BooleanQuery b = new BooleanQuery();
+    b.setMinimumNumberShouldMatch(2);
+    b.add(snear("w1","w2",1,true), Occur.SHOULD);
+    b.add(snear("w2","w3",1,true), Occur.SHOULD);
+    b.add(snear("w1","w3",3,true), Occur.SHOULD);
+
+    q.add(b, Occur.SHOULD);
+    
+    qtest(q, new int[] { 0,1,2 });
+  }
+
+  public void test2() throws Exception {
+    
+    BooleanQuery q = new BooleanQuery();
+    
+    q.add(qp.parse("\"w1 w2\"~1"), Occur.MUST);
+    q.add(snear(st("w2"),
+                sor("w5","zz"),
+                4, true),
+          Occur.SHOULD);
+    q.add(snear(sf("w3",2), st("w2"), st("w3"), 5, true),
+          Occur.SHOULD);
+    
+    Query t = new FilteredQuery(qp.parse("xx"),
+                                new ItemizedFilter(new int[] {1,3}));
+    t.setBoost(1000);
+    q.add(t, Occur.SHOULD);
+    
+    t = new ConstantScoreQuery(new ItemizedFilter(new int[] {0,2}));
+    t.setBoost(-20.0f);
+    q.add(t, Occur.SHOULD);
+    
+    DisjunctionMaxQuery dm = new DisjunctionMaxQuery(0.2f);
+    dm.add(snear(st("w2"),
+                 sor("w5","zz"),
+                 4, true));
+    dm.add(qp.parse("QQ"));
+    dm.add(qp.parse("xx yy -zz"));
+    dm.add(qp.parse("-xx -w1"));
+
+    DisjunctionMaxQuery dm2 = new DisjunctionMaxQuery(0.5f);
+    dm2.add(qp.parse("w1"));
+    dm2.add(qp.parse("w2"));
+    dm2.add(qp.parse("w3"));
+    dm.add(dm2);
+
+    q.add(dm, Occur.SHOULD);
+
+    BooleanQuery b = new BooleanQuery();
+    b.setMinimumNumberShouldMatch(2);
+    b.add(snear("w1","w2",1,true), Occur.SHOULD);
+    b.add(snear("w2","w3",1,true), Occur.SHOULD);
+    b.add(snear("w1","w3",3,true), Occur.SHOULD);
+    b.setBoost(0.0f);
+    
+    q.add(b, Occur.SHOULD);
+    
+    qtest(q, new int[] { 0,1,2 });
+  }
+  
+  // :TODO: we really need more crazy complex cases.
+
+
+  // //////////////////////////////////////////////////////////////////
+
+  // The rest of these aren't that complex, but they are <i>somewhat</i>
+  // complex, and they expose weakness in dealing with queries that match
+  // with scores of 0 wrapped in other queries
+
+  public void testT3() throws Exception {
+    bqtest("w1^0.0", new int[] { 0,1,2,3 });
+  }
+
+  public void testMA3() throws Exception {
+    Query q=new MatchAllDocsQuery();
+    q.setBoost(0);
+    bqtest(q, new int[] { 0,1,2,3 });
+  }
+  
+  public void testFQ5() throws Exception {
+    bqtest(new FilteredQuery(qp.parse("xx^0"),
+                             new ItemizedFilter(new int[] {1,3})),
+           new int[] {3});
+  }
+  
+  public void testCSQ4() throws Exception {
+    Query q = new ConstantScoreQuery(new ItemizedFilter(new int[] {3}));
+    q.setBoost(0);
+    bqtest(q, new int[] {3});
+  }
+  
+  public void testDMQ10() throws Exception {
+    DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.5f);
+    q.add(qp.parse("yy w5^100"));
+    q.add(qp.parse("xx^0"));
+    q.setBoost(0.0f);
+    bqtest(q, new int[] { 0,2,3 });
+  }
+  
+  public void testMPQ7() throws Exception {
+    MultiPhraseQuery q = new MultiPhraseQuery();
+    q.add(ta(new String[] {"w1"}));
+    q.add(ta(new String[] {"w2"}));
+    q.setSlop(1);
+    q.setBoost(0.0f);
+    bqtest(q, new int[] { 0,1,2 });
+  }
+  
+  public void testBQ12() throws Exception {
+    // NOTE: using qtest not bqtest
+    qtest("w1 w2^0.0", new int[] { 0,1,2,3 });
+  }
+  public void testBQ13() throws Exception {
+    // NOTE: using qtest not bqtest
+    qtest("w1 -w5^0.0", new int[] { 1,2,3 });
+  }
+  public void testBQ18() throws Exception {
+    // NOTE: using qtest not bqtest
+    qtest("+w1^0.0 w2", new int[] { 0,1,2,3 });
+  }
+  public void testBQ21() throws Exception {
+    bqtest("(+w1 w2)^0.0", new int[] { 0,1,2,3 });
+  }
+  public void testBQ22() throws Exception {
+    bqtest("(+w1^0.0 w2)^0.0", new int[] { 0,1,2,3 });
+  }
+
+  public void testST3() throws Exception {
+    SpanQuery q = st("w1");
+    q.setBoost(0);
+    bqtest(q, new int[] {0,1,2,3});
+  }
+  public void testST6() throws Exception {
+    SpanQuery q = st("xx");
+    q.setBoost(0);
+    qtest(q, new int[] {2,3});
+  }
+
+  public void testSF3() throws Exception {
+    SpanQuery q = sf(("w1"),1);
+    q.setBoost(0);
+    bqtest(q, new int[] {0,1,2,3});
+  }
+  public void testSF7() throws Exception {
+    SpanQuery q = sf(("xx"),3);
+    q.setBoost(0);
+    bqtest(q, new int[] {2,3});
+  }
+  
+  public void testSNot3() throws Exception {
+    SpanQuery q = snot(sf("w1",10),st("QQ"));
+    q.setBoost(0);
+    bqtest(q, new int[] {0,1,2,3});
+  }
+  public void testSNot6() throws Exception {
+    SpanQuery q = snot(sf("w1",10),st("xx"));
+    q.setBoost(0);
+    bqtest(q, new int[] {0,1,2,3});
+  }
+
+  public void testSNot8() throws Exception {
+    // NOTE: using qtest not bqtest
+    SpanQuery f = snear("w1","w3",10,true);
+    f.setBoost(0);
+    SpanQuery q = snot(f, st("xx"));
+    qtest(q, new int[] {0,1,3});
+  }
+  public void testSNot9() throws Exception {
+    // NOTE: using qtest not bqtest
+    SpanQuery t = st("xx");
+    t.setBoost(0);
+    SpanQuery q = snot(snear("w1","w3",10,true), t);
+    qtest(q, new int[] {0,1,3});
+  }
+
+
+  
+
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestComplexExplanationsOfNonMatches.java b/lucene/backwards/src/test/org/apache/lucene/search/TestComplexExplanationsOfNonMatches.java
new file mode 100644
index 0000000..2ca3324
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestComplexExplanationsOfNonMatches.java
@@ -0,0 +1,38 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+
+/**
+ * subclass of TestSimpleExplanations that verifies non matches.
+ */
+public class TestComplexExplanationsOfNonMatches
+  extends TestComplexExplanations {
+
+  /**
+   * Overrides superclass to ignore matches and focus on non-matches
+   *
+   * @see CheckHits#checkNoMatchExplanations
+   */
+  @Override
+  public void qtest(Query q, int[] expDocNrs) throws Exception {
+    CheckHits.checkNoMatchExplanations(q, FIELD, searcher, expDocNrs);
+  }
+    
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestConstantScoreQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
new file mode 100644
index 0000000..3e8341a
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
@@ -0,0 +1,132 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+import java.io.IOException;
+
+/** This class only tests some basic functionality in CSQ, the main parts are mostly
+ * tested by MultiTermQuery tests, explanations seems to be tested in TestExplanations! */
+public class TestConstantScoreQuery extends LuceneTestCase {
+  
+  public void testCSQ() throws Exception {
+    final Query q1 = new ConstantScoreQuery(new TermQuery(new Term("a", "b")));
+    final Query q2 = new ConstantScoreQuery(new TermQuery(new Term("a", "c")));
+    final Query q3 = new ConstantScoreQuery(new TermRangeFilter("a", "b", "c", true, true));
+    QueryUtils.check(q1);
+    QueryUtils.check(q2);
+    QueryUtils.checkEqual(q1,q1);
+    QueryUtils.checkEqual(q2,q2);
+    QueryUtils.checkEqual(q3,q3);
+    QueryUtils.checkUnequal(q1,q2);
+    QueryUtils.checkUnequal(q2,q3);
+    QueryUtils.checkUnequal(q1,q3);
+    QueryUtils.checkUnequal(q1, new TermQuery(new Term("a", "b")));
+  }
+  
+  private void checkHits(Searcher searcher, Query q, final float expectedScore, final String scorerClassName, final String innerScorerClassName) throws IOException {
+    final int[] count = new int[1];
+    searcher.search(q, new Collector() {
+      private Scorer scorer;
+    
+      @Override
+      public void setScorer(Scorer scorer) {
+        this.scorer = scorer;
+        assertEquals("Scorer is implemented by wrong class", scorerClassName, scorer.getClass().getName());
+        if (innerScorerClassName != null && scorer instanceof ConstantScoreQuery.ConstantScorer) {
+          final ConstantScoreQuery.ConstantScorer innerScorer = (ConstantScoreQuery.ConstantScorer) scorer;
+          assertEquals("inner Scorer is implemented by wrong class", innerScorerClassName, innerScorer.docIdSetIterator.getClass().getName());
+        }
+      }
+      
+      @Override
+      public void collect(int doc) throws IOException {
+        assertEquals("Score differs from expected", expectedScore, this.scorer.score());
+        count[0]++;
+      }
+      
+      @Override
+      public void setNextReader(IndexReader reader, int docBase) {
+      }
+      
+      @Override
+      public boolean acceptsDocsOutOfOrder() {
+        return true;
+      }
+    });
+    assertEquals("invalid number of results", 1, count[0]);
+  }
+  
+  public void testWrapped2Times() throws Exception {
+    Directory directory = null;
+    IndexReader reader = null;
+    IndexSearcher searcher = null;
+    try {
+      directory = newDirectory();
+      RandomIndexWriter writer = new RandomIndexWriter (random, directory);
+
+      Document doc = new Document();
+      doc.add(newField("field", "term", Field.Store.NO, Field.Index.NOT_ANALYZED));
+      writer.addDocument(doc);
+
+      reader = writer.getReader();
+      writer.close();
+      searcher = newSearcher(reader);
+      
+      // set a similarity that does not normalize our boost away
+      searcher.setSimilarity(new DefaultSimilarity() {
+        @Override
+        public float queryNorm(float sumOfSquaredWeights) {
+          return 1.0f;
+        }
+      });
+      
+      final Query csq1 = new ConstantScoreQuery(new TermQuery(new Term ("field", "term")));
+      csq1.setBoost(2.0f);
+      final Query csq2 = new ConstantScoreQuery(csq1);
+      csq2.setBoost(5.0f);
+      
+      final BooleanQuery bq = new BooleanQuery();
+      bq.add(csq1, BooleanClause.Occur.SHOULD);
+      bq.add(csq2, BooleanClause.Occur.SHOULD);
+      
+      final Query csqbq = new ConstantScoreQuery(bq);
+      csqbq.setBoost(17.0f);
+      
+      checkHits(searcher, csq1, csq1.getBoost(), ConstantScoreQuery.ConstantScorer.class.getName(), null);
+      checkHits(searcher, csq2, csq2.getBoost(), ConstantScoreQuery.ConstantScorer.class.getName(), ConstantScoreQuery.ConstantScorer.class.getName());
+      
+      // for the combined BQ, the scorer should always be BooleanScorer's BucketScorer, because our scorer supports out-of order collection!
+      final String bucketScorerClass = BooleanScorer.class.getName() + "$BucketScorer";
+      checkHits(searcher, bq, csq1.getBoost() + csq2.getBoost(), bucketScorerClass, null);
+      checkHits(searcher, csqbq, csqbq.getBoost(), ConstantScoreQuery.ConstantScorer.class.getName(), bucketScorerClass);
+    } finally {
+      if (searcher != null) searcher.close();
+      if (reader != null) reader.close();
+      if (directory != null) directory.close();
+    }
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestCustomSearcherSort.java b/lucene/backwards/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
new file mode 100644
index 0000000..e8806fd
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
@@ -0,0 +1,264 @@
+package org.apache.lucene.search;
+
+/**
+ * Copyright 2005 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Calendar;
+import java.util.GregorianCalendar;
+import java.util.Map;
+import java.util.Random;
+import java.util.TreeMap;
+
+import org.apache.lucene.document.DateTools;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/** Unit test for sorting code. */
+public class TestCustomSearcherSort extends LuceneTestCase implements Serializable {
+  
+  private Directory index = null;
+  private IndexReader reader;
+  private Query query = null;
+  // reduced from 20000 to 2000 to speed up test...
+  private int INDEX_SIZE;
+  
+  /**
+   * Create index and query for test cases.
+   */
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    INDEX_SIZE = atLeast(2000);
+    index = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, index);
+    RandomGen random = new RandomGen(this.random);
+    for (int i = 0; i < INDEX_SIZE; ++i) { // don't decrease; if to low the
+                                           // problem doesn't show up
+      Document doc = new Document();
+      if ((i % 5) != 0) { // some documents must not have an entry in the first
+                          // sort field
+        doc.add(newField("publicationDate_", random.getLuceneDate(),
+            Field.Store.YES, Field.Index.NOT_ANALYZED));
+      }
+      if ((i % 7) == 0) { // some documents to match the query (see below)
+        doc.add(newField("content", "test", Field.Store.YES,
+            Field.Index.ANALYZED));
+      }
+      // every document has a defined 'mandant' field
+      doc.add(newField("mandant", Integer.toString(i % 3), Field.Store.YES,
+          Field.Index.NOT_ANALYZED));
+      writer.addDocument(doc);
+    }
+    reader = writer.getReader();
+    writer.close();
+    query = new TermQuery(new Term("content", "test"));
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    reader.close();
+    index.close();
+    super.tearDown();
+  }
+  
+  /**
+   * Run the test using two CustomSearcher instances.
+   */
+  public void testFieldSortCustomSearcher() throws Exception {
+    // log("Run testFieldSortCustomSearcher");
+    // define the sort criteria
+    Sort custSort = new Sort(
+        new SortField("publicationDate_", SortField.STRING),
+        SortField.FIELD_SCORE);
+    Searcher searcher = new CustomSearcher(reader, 2);
+    // search and check hits
+    matchHits(searcher, custSort);
+  }
+  
+  /**
+   * Run the test using one CustomSearcher wrapped by a MultiSearcher.
+   */
+  public void testFieldSortSingleSearcher() throws Exception {
+    // log("Run testFieldSortSingleSearcher");
+    // define the sort criteria
+    Sort custSort = new Sort(
+        new SortField("publicationDate_", SortField.STRING),
+        SortField.FIELD_SCORE);
+    Searcher searcher = new MultiSearcher(new Searcher[] {new CustomSearcher(
+        reader, 2)});
+    // search and check hits
+    matchHits(searcher, custSort);
+  }
+  
+  /**
+   * Run the test using two CustomSearcher instances.
+   */
+  public void testFieldSortMultiCustomSearcher() throws Exception {
+    // log("Run testFieldSortMultiCustomSearcher");
+    // define the sort criteria
+    Sort custSort = new Sort(
+        new SortField("publicationDate_", SortField.STRING),
+        SortField.FIELD_SCORE);
+    Searcher searcher = new MultiSearcher(new Searchable[] {
+        new CustomSearcher(reader, 0), new CustomSearcher(reader, 2)});
+    // search and check hits
+    matchHits(searcher, custSort);
+  }
+  
+  // make sure the documents returned by the search match the expected list
+  private void matchHits(Searcher searcher, Sort sort) throws IOException {
+    // make a query without sorting first
+    ScoreDoc[] hitsByRank = searcher.search(query, null, Integer.MAX_VALUE).scoreDocs;
+    checkHits(hitsByRank, "Sort by rank: "); // check for duplicates
+    Map<Integer,Integer> resultMap = new TreeMap<Integer,Integer>();
+    // store hits in TreeMap - TreeMap does not allow duplicates; existing
+    // entries are silently overwritten
+    for (int hitid = 0; hitid < hitsByRank.length; ++hitid) {
+      resultMap.put(Integer.valueOf(hitsByRank[hitid].doc), // Key: Lucene
+                                                            // Document ID
+          Integer.valueOf(hitid)); // Value: Hits-Objekt Index
+    }
+    
+    // now make a query using the sort criteria
+    ScoreDoc[] resultSort = searcher.search(query, null, Integer.MAX_VALUE,
+        sort).scoreDocs;
+    checkHits(resultSort, "Sort by custom criteria: "); // check for duplicates
+    
+    // besides the sorting both sets of hits must be identical
+    for (int hitid = 0; hitid < resultSort.length; ++hitid) {
+      Integer idHitDate = Integer.valueOf(resultSort[hitid].doc); // document ID
+                                                                  // from sorted
+                                                                  // search
+      if (!resultMap.containsKey(idHitDate)) {
+        log("ID " + idHitDate + " not found. Possibliy a duplicate.");
+      }
+      assertTrue(resultMap.containsKey(idHitDate)); // same ID must be in the
+                                                    // Map from the rank-sorted
+                                                    // search
+      // every hit must appear once in both result sets --> remove it from the
+      // Map.
+      // At the end the Map must be empty!
+      resultMap.remove(idHitDate);
+    }
+    if (resultMap.size() == 0) {
+      // log("All hits matched");
+    } else {
+      log("Couldn't match " + resultMap.size() + " hits.");
+    }
+    assertEquals(resultMap.size(), 0);
+  }
+  
+  /**
+   * Check the hits for duplicates.
+   * 
+   * @param hits
+   */
+  private void checkHits(ScoreDoc[] hits, String prefix) {
+    if (hits != null) {
+      Map<Integer,Integer> idMap = new TreeMap<Integer,Integer>();
+      for (int docnum = 0; docnum < hits.length; ++docnum) {
+        Integer luceneId = null;
+        
+        luceneId = Integer.valueOf(hits[docnum].doc);
+        if (idMap.containsKey(luceneId)) {
+          StringBuilder message = new StringBuilder(prefix);
+          message.append("Duplicate key for hit index = ");
+          message.append(docnum);
+          message.append(", previous index = ");
+          message.append((idMap.get(luceneId)).toString());
+          message.append(", Lucene ID = ");
+          message.append(luceneId);
+          log(message.toString());
+        } else {
+          idMap.put(luceneId, Integer.valueOf(docnum));
+        }
+      }
+    }
+  }
+  
+  // Simply write to console - choosen to be independant of log4j etc
+  private void log(String message) {
+    if (VERBOSE) System.out.println(message);
+  }
+  
+  public class CustomSearcher extends IndexSearcher {
+    private int switcher;
+    
+    /**
+     * @param r
+     */
+    public CustomSearcher(IndexReader r, int switcher) {
+      super(r);
+      this.switcher = switcher;
+    }
+    
+    /*
+     * (non-Javadoc)
+     * 
+     * @see
+     * org.apache.lucene.search.Searchable#search(org.apache.lucene.search.Query
+     * , org.apache.lucene.search.Filter, int, org.apache.lucene.search.Sort)
+     */
+    @Override
+    public TopFieldDocs search(Query query, Filter filter, int nDocs, Sort sort)
+        throws IOException {
+      BooleanQuery bq = new BooleanQuery();
+      bq.add(query, BooleanClause.Occur.MUST);
+      bq.add(new TermQuery(new Term("mandant", Integer.toString(switcher))),
+          BooleanClause.Occur.MUST);
+      return super.search(bq, filter, nDocs, sort);
+    }
+    
+    /*
+     * (non-Javadoc)
+     * 
+     * @see
+     * org.apache.lucene.search.Searchable#search(org.apache.lucene.search.Query
+     * , org.apache.lucene.search.Filter, int)
+     */
+    @Override
+    public TopDocs search(Query query, Filter filter, int nDocs)
+        throws IOException {
+      BooleanQuery bq = new BooleanQuery();
+      bq.add(query, BooleanClause.Occur.MUST);
+      bq.add(new TermQuery(new Term("mandant", Integer.toString(switcher))),
+          BooleanClause.Occur.MUST);
+      return super.search(bq, filter, nDocs);
+    }
+  }
+  
+  private class RandomGen {
+    RandomGen(Random random) {
+      this.random = random;
+    }
+    
+    private Random random;
+    private Calendar base = new GregorianCalendar(1980, 1, 1);
+    
+    // Just to generate some different Lucene Date strings
+    private String getLuceneDate() {
+      return DateTools.timeToString(base.getTimeInMillis() + random.nextInt()
+          - Integer.MIN_VALUE, DateTools.Resolution.DAY);
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestDateFilter.java b/lucene/backwards/src/test/org/apache/lucene/search/TestDateFilter.java
new file mode 100644
index 0000000..1101d22
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestDateFilter.java
@@ -0,0 +1,174 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+import org.apache.lucene.document.DateTools;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+
+import java.io.IOException;
+
+/**
+ * DateFilter JUnit tests.
+ * 
+ * 
+ * @version $Revision$
+ */
+public class TestDateFilter extends LuceneTestCase {
+ 
+  /**
+   *
+   */
+  public void testBefore() throws IOException {
+    // create an index
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, indexStore);
+    
+    long now = System.currentTimeMillis();
+    
+    Document doc = new Document();
+    // add time that is in the past
+    doc.add(newField("datefield", DateTools.timeToString(now - 1000,
+        DateTools.Resolution.MILLISECOND), Field.Store.YES,
+        Field.Index.NOT_ANALYZED));
+    doc.add(newField("body", "Today is a very sunny day in New York City",
+        Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    
+    IndexReader reader = writer.getReader();
+    writer.close();
+    IndexSearcher searcher = newSearcher(reader);
+    
+    // filter that should preserve matches
+    // DateFilter df1 = DateFilter.Before("datefield", now);
+    TermRangeFilter df1 = new TermRangeFilter("datefield", DateTools
+        .timeToString(now - 2000, DateTools.Resolution.MILLISECOND), DateTools
+        .timeToString(now, DateTools.Resolution.MILLISECOND), false, true);
+    // filter that should discard matches
+    // DateFilter df2 = DateFilter.Before("datefield", now - 999999);
+    TermRangeFilter df2 = new TermRangeFilter("datefield", DateTools
+        .timeToString(0, DateTools.Resolution.MILLISECOND), DateTools
+        .timeToString(now - 2000, DateTools.Resolution.MILLISECOND), true,
+        false);
+    
+    // search something that doesn't exist with DateFilter
+    Query query1 = new TermQuery(new Term("body", "NoMatchForThis"));
+    
+    // search for something that does exists
+    Query query2 = new TermQuery(new Term("body", "sunny"));
+    
+    ScoreDoc[] result;
+    
+    // ensure that queries return expected results without DateFilter first
+    result = searcher.search(query1, null, 1000).scoreDocs;
+    assertEquals(0, result.length);
+    
+    result = searcher.search(query2, null, 1000).scoreDocs;
+    assertEquals(1, result.length);
+    
+    // run queries with DateFilter
+    result = searcher.search(query1, df1, 1000).scoreDocs;
+    assertEquals(0, result.length);
+    
+    result = searcher.search(query1, df2, 1000).scoreDocs;
+    assertEquals(0, result.length);
+    
+    result = searcher.search(query2, df1, 1000).scoreDocs;
+    assertEquals(1, result.length);
+    
+    result = searcher.search(query2, df2, 1000).scoreDocs;
+    assertEquals(0, result.length);
+    searcher.close();
+    reader.close();
+    indexStore.close();
+  }
+  
+  /**
+   *
+   */
+  public void testAfter() throws IOException {
+    // create an index
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, indexStore);
+    
+    long now = System.currentTimeMillis();
+    
+    Document doc = new Document();
+    // add time that is in the future
+    doc.add(newField("datefield", DateTools.timeToString(now + 888888,
+        DateTools.Resolution.MILLISECOND), Field.Store.YES,
+        Field.Index.NOT_ANALYZED));
+    doc.add(newField("body", "Today is a very sunny day in New York City",
+        Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    
+    IndexReader reader = writer.getReader();
+    writer.close();
+    IndexSearcher searcher = newSearcher(reader);
+    
+    // filter that should preserve matches
+    // DateFilter df1 = DateFilter.After("datefield", now);
+    TermRangeFilter df1 = new TermRangeFilter("datefield", DateTools
+        .timeToString(now, DateTools.Resolution.MILLISECOND), DateTools
+        .timeToString(now + 999999, DateTools.Resolution.MILLISECOND), true,
+        false);
+    // filter that should discard matches
+    // DateFilter df2 = DateFilter.After("datefield", now + 999999);
+    TermRangeFilter df2 = new TermRangeFilter("datefield", DateTools
+        .timeToString(now + 999999, DateTools.Resolution.MILLISECOND),
+        DateTools.timeToString(now + 999999999,
+            DateTools.Resolution.MILLISECOND), false, true);
+    
+    // search something that doesn't exist with DateFilter
+    Query query1 = new TermQuery(new Term("body", "NoMatchForThis"));
+    
+    // search for something that does exists
+    Query query2 = new TermQuery(new Term("body", "sunny"));
+    
+    ScoreDoc[] result;
+    
+    // ensure that queries return expected results without DateFilter first
+    result = searcher.search(query1, null, 1000).scoreDocs;
+    assertEquals(0, result.length);
+    
+    result = searcher.search(query2, null, 1000).scoreDocs;
+    assertEquals(1, result.length);
+    
+    // run queries with DateFilter
+    result = searcher.search(query1, df1, 1000).scoreDocs;
+    assertEquals(0, result.length);
+    
+    result = searcher.search(query1, df2, 1000).scoreDocs;
+    assertEquals(0, result.length);
+    
+    result = searcher.search(query2, df1, 1000).scoreDocs;
+    assertEquals(1, result.length);
+    
+    result = searcher.search(query2, df2, 1000).scoreDocs;
+    assertEquals(0, result.length);
+    searcher.close();
+    reader.close();
+    indexStore.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestDateSort.java b/lucene/backwards/src/test/org/apache/lucene/search/TestDateSort.java
new file mode 100644
index 0000000..039b388
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestDateSort.java
@@ -0,0 +1,125 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.DateTools;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.store.Directory;
+
+/**
+ * Test date sorting, i.e. auto-sorting of fields with type "long".
+ * See http://issues.apache.org/jira/browse/LUCENE-1045 
+ */
+public class TestDateSort extends LuceneTestCase {
+
+  private static final String TEXT_FIELD = "text";
+  private static final String DATE_TIME_FIELD = "dateTime";
+
+  private Directory directory;
+  private IndexReader reader;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    // Create an index writer.
+    directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory, new MockAnalyzer(random));
+
+    // oldest doc:
+    // Add the first document.  text = "Document 1"  dateTime = Oct 10 03:25:22 EDT 2007
+    writer.addDocument(createDocument("Document 1", 1192001122000L));
+    // Add the second document.  text = "Document 2"  dateTime = Oct 10 03:25:26 EDT 2007 
+    writer.addDocument(createDocument("Document 2", 1192001126000L));
+    // Add the third document.  text = "Document 3"  dateTime = Oct 11 07:12:13 EDT 2007 
+    writer.addDocument(createDocument("Document 3", 1192101133000L));
+    // Add the fourth document.  text = "Document 4"  dateTime = Oct 11 08:02:09 EDT 2007
+    writer.addDocument(createDocument("Document 4", 1192104129000L));
+    // latest doc:
+    // Add the fifth document.  text = "Document 5"  dateTime = Oct 12 13:25:43 EDT 2007
+    writer.addDocument(createDocument("Document 5", 1192209943000L));
+
+    reader = writer.getReader();
+    writer.close();
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    reader.close();
+    directory.close();
+    super.tearDown();
+  }
+
+  public void testReverseDateSort() throws Exception {
+    IndexSearcher searcher = newSearcher(reader);
+
+    Sort sort = new Sort(new SortField(DATE_TIME_FIELD, SortField.STRING, true));
+
+    QueryParser queryParser = new QueryParser(TEST_VERSION_CURRENT, TEXT_FIELD, new MockAnalyzer(random));
+    Query query = queryParser.parse("Document");
+
+    // Execute the search and process the search results.
+    String[] actualOrder = new String[5];
+    ScoreDoc[] hits = searcher.search(query, null, 1000, sort).scoreDocs;
+    for (int i = 0; i < hits.length; i++) {
+      Document document = searcher.doc(hits[i].doc);
+      String text = document.get(TEXT_FIELD);
+      actualOrder[i] = text;
+    }
+    searcher.close();
+
+    // Set up the expected order (i.e. Document 5, 4, 3, 2, 1).
+    String[] expectedOrder = new String[5];
+    expectedOrder[0] = "Document 5";
+    expectedOrder[1] = "Document 4";
+    expectedOrder[2] = "Document 3";
+    expectedOrder[3] = "Document 2";
+    expectedOrder[4] = "Document 1";
+
+    assertEquals(Arrays.asList(expectedOrder), Arrays.asList(actualOrder));
+  }
+
+  private Document createDocument(String text, long time) {
+    Document document = new Document();
+
+    // Add the text field.
+    Field textField = newField(TEXT_FIELD, text, Field.Store.YES, Field.Index.ANALYZED);
+    document.add(textField);
+
+    // Add the date/time field.
+    String dateTimeString = DateTools.timeToString(time, DateTools.Resolution.SECOND);
+    Field dateTimeField = newField(DATE_TIME_FIELD, dateTimeString, Field.Store.YES,
+        Field.Index.NOT_ANALYZED);
+    document.add(dateTimeField);
+
+    return document;
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
new file mode 100644
index 0000000..c7df002
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
@@ -0,0 +1,497 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.FieldInvertState;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+
+import java.text.DecimalFormat;
+import java.io.IOException;
+
+/**
+ * Test of the DisjunctionMaxQuery.
+ * 
+ */
+public class TestDisjunctionMaxQuery extends LuceneTestCase {
+  
+  /** threshold for comparing floats */
+  public static final float SCORE_COMP_THRESH = 0.0000f;
+  
+  /**
+   * Similarity to eliminate tf, idf and lengthNorm effects to isolate test
+   * case.
+   * 
+   * <p>
+   * same as TestRankingSimilarity in TestRanking.zip from
+   * http://issues.apache.org/jira/browse/LUCENE-323
+   * </p>
+   */
+  private static class TestSimilarity extends DefaultSimilarity {
+    
+    public TestSimilarity() {}
+    
+    @Override
+    public float tf(float freq) {
+      if (freq > 0.0f) return 1.0f;
+      else return 0.0f;
+    }
+    
+    @Override
+    public float computeNorm(String fieldName, FieldInvertState state) {
+      // Disable length norm
+      return state.getBoost();
+    }
+    
+    @Override
+    public float idf(int docFreq, int numDocs) {
+      return 1.0f;
+    }
+  }
+  
+  public Similarity sim = new TestSimilarity();
+  public Directory index;
+  public IndexReader r;
+  public IndexSearcher s;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    
+    index = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, index,
+        newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
+                                                     .setSimilarity(sim).setMergePolicy(newLogMergePolicy()));
+    
+    // hed is the most important field, dek is secondary
+    
+    // d1 is an "ok" match for: albino elephant
+    {
+      Document d1 = new Document();
+      d1.add(newField("id", "d1", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
+                                                                               // "d1"));
+      d1
+          .add(newField("hed", "elephant", Field.Store.YES,
+              Field.Index.ANALYZED));// Field.Text("hed", "elephant"));
+      d1
+          .add(newField("dek", "elephant", Field.Store.YES,
+              Field.Index.ANALYZED));// Field.Text("dek", "elephant"));
+      writer.addDocument(d1);
+    }
+    
+    // d2 is a "good" match for: albino elephant
+    {
+      Document d2 = new Document();
+      d2.add(newField("id", "d2", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
+                                                                               // "d2"));
+      d2
+          .add(newField("hed", "elephant", Field.Store.YES,
+              Field.Index.ANALYZED));// Field.Text("hed", "elephant"));
+      d2.add(newField("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("dek",
+                                                                                // "albino"));
+      d2
+          .add(newField("dek", "elephant", Field.Store.YES,
+              Field.Index.ANALYZED));// Field.Text("dek", "elephant"));
+      writer.addDocument(d2);
+    }
+    
+    // d3 is a "better" match for: albino elephant
+    {
+      Document d3 = new Document();
+      d3.add(newField("id", "d3", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
+                                                                               // "d3"));
+      d3.add(newField("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("hed",
+                                                                                // "albino"));
+      d3
+          .add(newField("hed", "elephant", Field.Store.YES,
+              Field.Index.ANALYZED));// Field.Text("hed", "elephant"));
+      writer.addDocument(d3);
+    }
+    
+    // d4 is the "best" match for: albino elephant
+    {
+      Document d4 = new Document();
+      d4.add(newField("id", "d4", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
+                                                                               // "d4"));
+      d4.add(newField("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("hed",
+                                                                                // "albino"));
+      d4
+          .add(newField("hed", "elephant", Field.Store.YES,
+              Field.Index.ANALYZED));// Field.Text("hed", "elephant"));
+      d4.add(newField("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("dek",
+                                                                                // "albino"));
+      writer.addDocument(d4);
+    }
+
+    writer.optimize();
+    r = writer.getReader();
+    writer.close();
+    s = newSearcher(r);
+    s.setSimilarity(sim);
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    s.close();
+    r.close();
+    index.close();
+    super.tearDown();
+  }
+  
+  public void testSkipToFirsttimeMiss() throws IOException {
+    final DisjunctionMaxQuery dq = new DisjunctionMaxQuery(0.0f);
+    dq.add(tq("id", "d1"));
+    dq.add(tq("dek", "DOES_NOT_EXIST"));
+    
+    QueryUtils.check(random, dq, s);
+    
+    final Weight dw = s.createNormalizedWeight(dq);
+    IndexReader sub = s.getIndexReader().getSequentialSubReaders() == null ?
+        s.getIndexReader() : s.getIndexReader().getSequentialSubReaders()[0];
+    final Scorer ds = dw.scorer(sub, true, false);
+    final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS;
+    if (skipOk) {
+      fail("firsttime skipTo found a match? ... "
+          + r.document(ds.docID()).get("id"));
+    }
+  }
+  
+  public void testSkipToFirsttimeHit() throws IOException {
+    final DisjunctionMaxQuery dq = new DisjunctionMaxQuery(0.0f);
+    dq.add(tq("dek", "albino"));
+    dq.add(tq("dek", "DOES_NOT_EXIST"));
+    
+    QueryUtils.check(random, dq, s);
+
+    final Weight dw = s.createNormalizedWeight(dq);
+    IndexReader sub = s.getIndexReader().getSequentialSubReaders() == null ?
+        s.getIndexReader() : s.getIndexReader().getSequentialSubReaders()[0];
+    final Scorer ds = dw.scorer(sub, true, false);
+    assertTrue("firsttime skipTo found no match",
+        ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
+    assertEquals("found wrong docid", "d4", r.document(ds.docID()).get("id"));
+  }
+  
+  public void testSimpleEqualScores1() throws Exception {
+    
+    DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f);
+    q.add(tq("hed", "albino"));
+    q.add(tq("hed", "elephant"));
+    QueryUtils.check(random, q, s);
+    
+    ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+    
+    try {
+      assertEquals("all docs should match " + q.toString(), 4, h.length);
+      
+      float score = h[0].score;
+      for (int i = 1; i < h.length; i++) {
+        assertEquals("score #" + i + " is not the same", score, h[i].score,
+            SCORE_COMP_THRESH);
+      }
+    } catch (Error e) {
+      printHits("testSimpleEqualScores1", h, s);
+      throw e;
+    }
+    
+  }
+  
+  public void testSimpleEqualScores2() throws Exception {
+    
+    DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f);
+    q.add(tq("dek", "albino"));
+    q.add(tq("dek", "elephant"));
+    QueryUtils.check(random, q, s);
+    
+    ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+    
+    try {
+      assertEquals("3 docs should match " + q.toString(), 3, h.length);
+      float score = h[0].score;
+      for (int i = 1; i < h.length; i++) {
+        assertEquals("score #" + i + " is not the same", score, h[i].score,
+            SCORE_COMP_THRESH);
+      }
+    } catch (Error e) {
+      printHits("testSimpleEqualScores2", h, s);
+      throw e;
+    }
+    
+  }
+  
+  public void testSimpleEqualScores3() throws Exception {
+    
+    DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f);
+    q.add(tq("hed", "albino"));
+    q.add(tq("hed", "elephant"));
+    q.add(tq("dek", "albino"));
+    q.add(tq("dek", "elephant"));
+    QueryUtils.check(random, q, s);
+    
+    ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+    
+    try {
+      assertEquals("all docs should match " + q.toString(), 4, h.length);
+      float score = h[0].score;
+      for (int i = 1; i < h.length; i++) {
+        assertEquals("score #" + i + " is not the same", score, h[i].score,
+            SCORE_COMP_THRESH);
+      }
+    } catch (Error e) {
+      printHits("testSimpleEqualScores3", h, s);
+      throw e;
+    }
+    
+  }
+  
+  public void testSimpleTiebreaker() throws Exception {
+    
+    DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.01f);
+    q.add(tq("dek", "albino"));
+    q.add(tq("dek", "elephant"));
+    QueryUtils.check(random, q, s);
+    
+    ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+    
+    try {
+      assertEquals("3 docs should match " + q.toString(), 3, h.length);
+      assertEquals("wrong first", "d2", s.doc(h[0].doc).get("id"));
+      float score0 = h[0].score;
+      float score1 = h[1].score;
+      float score2 = h[2].score;
+      assertTrue("d2 does not have better score then others: " + score0
+          + " >? " + score1, score0 > score1);
+      assertEquals("d4 and d1 don't have equal scores", score1, score2,
+          SCORE_COMP_THRESH);
+    } catch (Error e) {
+      printHits("testSimpleTiebreaker", h, s);
+      throw e;
+    }
+  }
+  
+  public void testBooleanRequiredEqualScores() throws Exception {
+    
+    BooleanQuery q = new BooleanQuery();
+    {
+      DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.0f);
+      q1.add(tq("hed", "albino"));
+      q1.add(tq("dek", "albino"));
+      q.add(q1, BooleanClause.Occur.MUST);// true,false);
+      QueryUtils.check(random, q1, s);
+      
+    }
+    {
+      DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.0f);
+      q2.add(tq("hed", "elephant"));
+      q2.add(tq("dek", "elephant"));
+      q.add(q2, BooleanClause.Occur.MUST);// true,false);
+      QueryUtils.check(random, q2, s);
+    }
+    
+    QueryUtils.check(random, q, s);
+    
+    ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+    
+    try {
+      assertEquals("3 docs should match " + q.toString(), 3, h.length);
+      float score = h[0].score;
+      for (int i = 1; i < h.length; i++) {
+        assertEquals("score #" + i + " is not the same", score, h[i].score,
+            SCORE_COMP_THRESH);
+      }
+    } catch (Error e) {
+      printHits("testBooleanRequiredEqualScores1", h, s);
+      throw e;
+    }
+  }
+  
+  public void testBooleanOptionalNoTiebreaker() throws Exception {
+    
+    BooleanQuery q = new BooleanQuery();
+    {
+      DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.0f);
+      q1.add(tq("hed", "albino"));
+      q1.add(tq("dek", "albino"));
+      q.add(q1, BooleanClause.Occur.SHOULD);// false,false);
+    }
+    {
+      DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.0f);
+      q2.add(tq("hed", "elephant"));
+      q2.add(tq("dek", "elephant"));
+      q.add(q2, BooleanClause.Occur.SHOULD);// false,false);
+    }
+    QueryUtils.check(random, q, s);
+    
+    ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+    
+    try {
+      assertEquals("4 docs should match " + q.toString(), 4, h.length);
+      float score = h[0].score;
+      for (int i = 1; i < h.length - 1; i++) { /* note: -1 */
+        assertEquals("score #" + i + " is not the same", score, h[i].score,
+            SCORE_COMP_THRESH);
+      }
+      assertEquals("wrong last", "d1", s.doc(h[h.length - 1].doc).get("id"));
+      float score1 = h[h.length - 1].score;
+      assertTrue("d1 does not have worse score then others: " + score + " >? "
+          + score1, score > score1);
+    } catch (Error e) {
+      printHits("testBooleanOptionalNoTiebreaker", h, s);
+      throw e;
+    }
+  }
+  
+  public void testBooleanOptionalWithTiebreaker() throws Exception {
+    
+    BooleanQuery q = new BooleanQuery();
+    {
+      DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.01f);
+      q1.add(tq("hed", "albino"));
+      q1.add(tq("dek", "albino"));
+      q.add(q1, BooleanClause.Occur.SHOULD);// false,false);
+    }
+    {
+      DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.01f);
+      q2.add(tq("hed", "elephant"));
+      q2.add(tq("dek", "elephant"));
+      q.add(q2, BooleanClause.Occur.SHOULD);// false,false);
+    }
+    QueryUtils.check(random, q, s);
+    
+    ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+    
+    try {
+      
+      assertEquals("4 docs should match " + q.toString(), 4, h.length);
+      
+      float score0 = h[0].score;
+      float score1 = h[1].score;
+      float score2 = h[2].score;
+      float score3 = h[3].score;
+      
+      String doc0 = s.doc(h[0].doc).get("id");
+      String doc1 = s.doc(h[1].doc).get("id");
+      String doc2 = s.doc(h[2].doc).get("id");
+      String doc3 = s.doc(h[3].doc).get("id");
+      
+      assertTrue("doc0 should be d2 or d4: " + doc0, doc0.equals("d2")
+          || doc0.equals("d4"));
+      assertTrue("doc1 should be d2 or d4: " + doc0, doc1.equals("d2")
+          || doc1.equals("d4"));
+      assertEquals("score0 and score1 should match", score0, score1,
+          SCORE_COMP_THRESH);
+      assertEquals("wrong third", "d3", doc2);
+      assertTrue("d3 does not have worse score then d2 and d4: " + score1
+          + " >? " + score2, score1 > score2);
+      
+      assertEquals("wrong fourth", "d1", doc3);
+      assertTrue("d1 does not have worse score then d3: " + score2 + " >? "
+          + score3, score2 > score3);
+      
+    } catch (Error e) {
+      printHits("testBooleanOptionalWithTiebreaker", h, s);
+      throw e;
+    }
+    
+  }
+  
+  public void testBooleanOptionalWithTiebreakerAndBoost() throws Exception {
+    
+    BooleanQuery q = new BooleanQuery();
+    {
+      DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.01f);
+      q1.add(tq("hed", "albino", 1.5f));
+      q1.add(tq("dek", "albino"));
+      q.add(q1, BooleanClause.Occur.SHOULD);// false,false);
+    }
+    {
+      DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.01f);
+      q2.add(tq("hed", "elephant", 1.5f));
+      q2.add(tq("dek", "elephant"));
+      q.add(q2, BooleanClause.Occur.SHOULD);// false,false);
+    }
+    QueryUtils.check(random, q, s);
+    
+    ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+    
+    try {
+      
+      assertEquals("4 docs should match " + q.toString(), 4, h.length);
+      
+      float score0 = h[0].score;
+      float score1 = h[1].score;
+      float score2 = h[2].score;
+      float score3 = h[3].score;
+      
+      String doc0 = s.doc(h[0].doc).get("id");
+      String doc1 = s.doc(h[1].doc).get("id");
+      String doc2 = s.doc(h[2].doc).get("id");
+      String doc3 = s.doc(h[3].doc).get("id");
+      
+      assertEquals("doc0 should be d4: ", "d4", doc0);
+      assertEquals("doc1 should be d3: ", "d3", doc1);
+      assertEquals("doc2 should be d2: ", "d2", doc2);
+      assertEquals("doc3 should be d1: ", "d1", doc3);
+      
+      assertTrue("d4 does not have a better score then d3: " + score0 + " >? "
+          + score1, score0 > score1);
+      assertTrue("d3 does not have a better score then d2: " + score1 + " >? "
+          + score2, score1 > score2);
+      assertTrue("d3 does not have a better score then d1: " + score2 + " >? "
+          + score3, score2 > score3);
+      
+    } catch (Error e) {
+      printHits("testBooleanOptionalWithTiebreakerAndBoost", h, s);
+      throw e;
+    }
+  }
+  
+  /** macro */
+  protected Query tq(String f, String t) {
+    return new TermQuery(new Term(f, t));
+  }
+  
+  /** macro */
+  protected Query tq(String f, String t, float b) {
+    Query q = tq(f, t);
+    q.setBoost(b);
+    return q;
+  }
+  
+  protected void printHits(String test, ScoreDoc[] h, Searcher searcher)
+      throws Exception {
+    
+    System.err.println("------- " + test + " -------");
+    
+    DecimalFormat f = new DecimalFormat("0.000000000");
+    
+    for (int i = 0; i < h.length; i++) {
+      Document d = searcher.doc(h[i].doc);
+      float score = h[i].score;
+      System.err
+          .println("#" + i + ": " + f.format(score) + " - " + d.get("id"));
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestDocBoost.java b/lucene/backwards/src/test/org/apache/lucene/search/TestDocBoost.java
new file mode 100644
index 0000000..9878227
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestDocBoost.java
@@ -0,0 +1,100 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/** Document boost unit test.
+ *
+ *
+ * @version $Revision$
+ */
+public class TestDocBoost extends LuceneTestCase {
+
+  public void testDocBoost() throws Exception {
+    Directory store = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, store, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+
+    Fieldable f1 = newField("field", "word", Field.Store.YES, Field.Index.ANALYZED);
+    Fieldable f2 = newField("field", "word", Field.Store.YES, Field.Index.ANALYZED);
+    f2.setBoost(2.0f);
+
+    Document d1 = new Document();
+    Document d2 = new Document();
+    Document d3 = new Document();
+    Document d4 = new Document();
+    d3.setBoost(3.0f);
+    d4.setBoost(2.0f);
+
+    d1.add(f1);                                 // boost = 1
+    d2.add(f2);                                 // boost = 2
+    d3.add(f1);                                 // boost = 3
+    d4.add(f2);                                 // boost = 4
+
+    writer.addDocument(d1);
+    writer.addDocument(d2);
+    writer.addDocument(d3);
+    writer.addDocument(d4);
+
+    IndexReader reader = writer.getReader();
+    writer.close();
+
+    final float[] scores = new float[4];
+
+    newSearcher(reader).search
+      (new TermQuery(new Term("field", "word")),
+       new Collector() {
+         private int base = 0;
+         private Scorer scorer;
+         @Override
+         public void setScorer(Scorer scorer) throws IOException {
+          this.scorer = scorer;
+         }
+         @Override
+         public final void collect(int doc) throws IOException {
+           scores[doc + base] = scorer.score();
+         }
+         @Override
+         public void setNextReader(IndexReader reader, int docBase) {
+           base = docBase;
+         }
+         @Override
+         public boolean acceptsDocsOutOfOrder() {
+           return true;
+         }
+       });
+
+    float lastScore = 0.0f;
+
+    for (int i = 0; i < 4; i++) {
+      assertTrue(scores[i] > lastScore);
+      lastScore = scores[i];
+    }
+    
+    reader.close();
+    store.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestDocIdSet.java b/lucene/backwards/src/test/org/apache/lucene/search/TestDocIdSet.java
new file mode 100644
index 0000000..0cdf640
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestDocIdSet.java
@@ -0,0 +1,128 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+
+import junit.framework.Assert;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestDocIdSet extends LuceneTestCase {
+  public void testFilteredDocIdSet() throws Exception {
+    final int maxdoc=10;
+    final DocIdSet innerSet = new DocIdSet() {
+
+        @Override
+        public DocIdSetIterator iterator() {
+          return new DocIdSetIterator() {
+
+            int docid = -1;
+            
+            @Override
+            public int docID() {
+              return docid;
+            }
+            
+            @Override
+            public int nextDoc() throws IOException {
+              docid++;
+              return docid < maxdoc ? docid : (docid = NO_MORE_DOCS);
+            }
+
+            @Override
+            public int advance(int target) throws IOException {
+              while (nextDoc() < target) {}
+              return docid;
+            }
+          };
+        } 
+      };
+	  
+		
+    DocIdSet filteredSet = new FilteredDocIdSet(innerSet){
+        @Override
+        protected boolean match(int docid) {
+          return docid%2 == 0;  //validate only even docids
+        }	
+      };
+	  
+    DocIdSetIterator iter = filteredSet.iterator();
+    ArrayList<Integer> list = new ArrayList<Integer>();
+    int doc = iter.advance(3);
+    if (doc != DocIdSetIterator.NO_MORE_DOCS) {
+      list.add(Integer.valueOf(doc));
+      while((doc = iter.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
+        list.add(Integer.valueOf(doc));
+      }
+    }
+	  
+    int[] docs = new int[list.size()];
+    int c=0;
+    Iterator<Integer> intIter = list.iterator();
+    while(intIter.hasNext()) {
+      docs[c++] = intIter.next().intValue();
+    }
+    int[] answer = new int[]{4,6,8};
+    boolean same = Arrays.equals(answer, docs);
+    if (!same) {
+      System.out.println("answer: " + Arrays.toString(answer));
+      System.out.println("gotten: " + Arrays.toString(docs));
+      fail();
+    }
+  }
+  
+  public void testNullDocIdSet() throws Exception {
+    // Tests that if a Filter produces a null DocIdSet, which is given to
+    // IndexSearcher, everything works fine. This came up in LUCENE-1754.
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir);
+    Document doc = new Document();
+    doc.add(newField("c", "val", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
+    writer.addDocument(doc);
+    IndexReader reader = writer.getReader();
+    writer.close();
+    
+    // First verify the document is searchable.
+    IndexSearcher searcher = newSearcher(reader);
+    Assert.assertEquals(1, searcher.search(new MatchAllDocsQuery(), 10).totalHits);
+    
+    // Now search w/ a Filter which returns a null DocIdSet
+    Filter f = new Filter() {
+      @Override
+      public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
+        return null;
+      }
+    };
+    
+    Assert.assertEquals(0, searcher.search(new MatchAllDocsQuery(), f, 10).totalHits);
+    searcher.close();
+    reader.close();
+    dir.close();
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestElevationComparator.java b/lucene/backwards/src/test/org/apache/lucene/search/TestElevationComparator.java
new file mode 100644
index 0000000..506eec6
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestElevationComparator.java
@@ -0,0 +1,183 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.FieldValueHitQueue.Entry;
+import org.apache.lucene.store.*;
+import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+public class TestElevationComparator extends LuceneTestCase {
+
+  private final Map<String,Integer> priority = new HashMap<String,Integer>();
+
+  //@Test
+  public void testSorting() throws Throwable {
+    Directory directory = newDirectory();
+    IndexWriter writer = new IndexWriter(
+        directory,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(2).
+            setMergePolicy(newLogMergePolicy(1000))
+    );
+    writer.addDocument(adoc(new String[] {"id", "a", "title", "ipod", "str_s", "a"}));
+    writer.addDocument(adoc(new String[] {"id", "b", "title", "ipod ipod", "str_s", "b"}));
+    writer.addDocument(adoc(new String[] {"id", "c", "title", "ipod ipod ipod", "str_s","c"}));
+    writer.addDocument(adoc(new String[] {"id", "x", "title", "boosted", "str_s", "x"}));
+    writer.addDocument(adoc(new String[] {"id", "y", "title", "boosted boosted", "str_s","y"}));
+    writer.addDocument(adoc(new String[] {"id", "z", "title", "boosted boosted boosted","str_s", "z"}));
+
+    IndexReader r = IndexReader.open(writer, true);
+    writer.close();
+
+    IndexSearcher searcher = newSearcher(r);
+
+    runTest(searcher, true);
+    runTest(searcher, false);
+
+    searcher.close();
+    r.close();
+    directory.close();
+  }
+
+  private void runTest(IndexSearcher searcher, boolean reversed) throws Throwable {
+
+    BooleanQuery newq = new BooleanQuery(false);
+    TermQuery query = new TermQuery(new Term("title", "ipod"));
+
+    newq.add(query, BooleanClause.Occur.SHOULD);
+    newq.add(getElevatedQuery(new String[] {"id", "a", "id", "x"}), BooleanClause.Occur.SHOULD);
+
+    Sort sort = new Sort(
+        new SortField("id", new ElevationComparatorSource(priority), false),
+        new SortField(null, SortField.SCORE, reversed)
+      );
+
+    TopDocsCollector<Entry> topCollector = TopFieldCollector.create(sort, 50, false, true, true, true);
+    searcher.search(newq, null, topCollector);
+
+    TopDocs topDocs = topCollector.topDocs(0, 10);
+    int nDocsReturned = topDocs.scoreDocs.length;
+
+    assertEquals(4, nDocsReturned);
+
+    // 0 & 3 were elevated
+    assertEquals(0, topDocs.scoreDocs[0].doc);
+    assertEquals(3, topDocs.scoreDocs[1].doc);
+
+    if (reversed) {
+      assertEquals(2, topDocs.scoreDocs[2].doc);
+      assertEquals(1, topDocs.scoreDocs[3].doc);
+    } else {
+      assertEquals(1, topDocs.scoreDocs[2].doc);
+      assertEquals(2, topDocs.scoreDocs[3].doc);
+    }
+
+    /*
+    for (int i = 0; i < nDocsReturned; i++) {
+     ScoreDoc scoreDoc = topDocs.scoreDocs[i];
+     ids[i] = scoreDoc.doc;
+     scores[i] = scoreDoc.score;
+     documents[i] = searcher.doc(ids[i]);
+     System.out.println("ids[i] = " + ids[i]);
+     System.out.println("documents[i] = " + documents[i]);
+     System.out.println("scores[i] = " + scores[i]);
+   }
+    */
+ }
+
+ private Query getElevatedQuery(String[] vals) {
+   BooleanQuery q = new BooleanQuery(false);
+   q.setBoost(0);
+   int max = (vals.length / 2) + 5;
+   for (int i = 0; i < vals.length - 1; i += 2) {
+     q.add(new TermQuery(new Term(vals[i], vals[i + 1])), BooleanClause.Occur.SHOULD);
+     priority.put(vals[i + 1], Integer.valueOf(max--));
+     // System.out.println(" pri doc=" + vals[i+1] + " pri=" + (1+max));
+   }
+   return q;
+ }
+
+ private Document adoc(String[] vals) {
+   Document doc = new Document();
+   for (int i = 0; i < vals.length - 2; i += 2) {
+     doc.add(newField(vals[i], vals[i + 1], Field.Store.YES, Field.Index.ANALYZED));
+   }
+   return doc;
+ }
+}
+
+class ElevationComparatorSource extends FieldComparatorSource {
+  private final Map<String,Integer> priority;
+
+  public ElevationComparatorSource(final Map<String,Integer> boosts) {
+   this.priority = boosts;
+  }
+
+  @Override
+  public FieldComparator newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
+   return new FieldComparator<Integer>() {
+
+     FieldCache.StringIndex idIndex;
+     private final int[] values = new int[numHits];
+     int bottomVal;
+
+     @Override
+     public int compare(int slot1, int slot2) {
+       return values[slot2] - values[slot1];  // values will be small enough that there is no overflow concern
+     }
+
+     @Override
+     public void setBottom(int slot) {
+       bottomVal = values[slot];
+     }
+
+     private int docVal(int doc) throws IOException {
+       String id = idIndex.lookup[idIndex.order[doc]];
+       Integer prio = priority.get(id);
+       return prio == null ? 0 : prio.intValue();
+     }
+
+     @Override
+     public int compareBottom(int doc) throws IOException {
+       return docVal(doc) - bottomVal;
+     }
+
+     @Override
+     public void copy(int slot, int doc) throws IOException {
+       values[slot] = docVal(doc);
+     }
+
+     @Override
+     public void setNextReader(IndexReader reader, int docBase) throws IOException {
+       idIndex = FieldCache.DEFAULT.getStringIndex(reader, fieldname);
+     }
+
+     @Override
+     public Integer value(int slot) {
+       return Integer.valueOf(values[slot]);
+     }
+   };
+ }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestExplanations.java b/lucene/backwards/src/test/org/apache/lucene/search/TestExplanations.java
new file mode 100644
index 0000000..94bbb69
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestExplanations.java
@@ -0,0 +1,256 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.queryParser.ParseException;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.spans.SpanFirstQuery;
+import org.apache.lucene.search.spans.SpanNearQuery;
+import org.apache.lucene.search.spans.SpanNotQuery;
+import org.apache.lucene.search.spans.SpanOrQuery;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Tests primitive queries (ie: that rewrite to themselves) to
+ * insure they match the expected set of docs, and that the score of each
+ * match is equal to the value of the scores explanation.
+ *
+ * <p>
+ * The assumption is that if all of the "primitive" queries work well,
+ * then anything that rewrites to a primitive will work well also.
+ * </p>
+ *
+ * @see "Subclasses for actual tests"
+ */
+public class TestExplanations extends LuceneTestCase {
+  protected IndexSearcher searcher;
+  protected IndexReader reader;
+  protected Directory directory;
+  
+  public static final String KEY = "KEY";
+  // boost on this field is the same as the iterator for the doc
+  public static final String FIELD = "field";
+  // same contents, but no field boost
+  public static final String ALTFIELD = "alt";
+  public static final QueryParser qp =
+    new QueryParser(TEST_VERSION_CURRENT, FIELD, new MockAnalyzer(random));
+
+  @Override
+  public void tearDown() throws Exception {
+    searcher.close();
+    reader.close();
+    directory.close();
+    super.tearDown();
+  }
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    directory = newDirectory();
+    RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    for (int i = 0; i < docFields.length; i++) {
+      Document doc = new Document();
+      doc.add(newField(KEY, ""+i, Field.Store.NO, Field.Index.NOT_ANALYZED));
+      Field f = newField(FIELD, docFields[i], Field.Store.NO, Field.Index.ANALYZED);
+      f.setBoost(i);
+      doc.add(f);
+      doc.add(newField(ALTFIELD, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+    reader = writer.getReader();
+    writer.close();
+    searcher = newSearcher(reader);
+  }
+
+  protected String[] docFields = {
+    "w1 w2 w3 w4 w5",
+    "w1 w3 w2 w3 zz",
+    "w1 xx w2 yy w3",
+    "w1 w3 xx w2 yy w3 zz"
+  };
+
+  public Query makeQuery(String queryText) throws ParseException {
+    return qp.parse(queryText);
+  }
+
+  /** check the expDocNrs first, then check the query (and the explanations) */
+  public void qtest(String queryText, int[] expDocNrs) throws Exception {
+    qtest(makeQuery(queryText), expDocNrs);
+  }
+  
+  /** check the expDocNrs first, then check the query (and the explanations) */
+  public void qtest(Query q, int[] expDocNrs) throws Exception {
+    CheckHits.checkHitCollector(random, q, FIELD, searcher, expDocNrs);
+  }
+
+  /**
+   * Tests a query using qtest after wrapping it with both optB and reqB
+   * @see #qtest
+   * @see #reqB
+   * @see #optB
+   */
+  public void bqtest(Query q, int[] expDocNrs) throws Exception {
+    qtest(reqB(q), expDocNrs);
+    qtest(optB(q), expDocNrs);
+  }
+  /**
+   * Tests a query using qtest after wrapping it with both optB and reqB
+   * @see #qtest
+   * @see #reqB
+   * @see #optB
+   */
+  public void bqtest(String queryText, int[] expDocNrs) throws Exception {
+    bqtest(makeQuery(queryText), expDocNrs);
+  }
+  
+  /** 
+   * Convenience subclass of FieldCacheTermsFilter
+   */
+  public static class ItemizedFilter extends FieldCacheTermsFilter {
+    private static String[] int2str(int [] terms) {
+      String [] out = new String[terms.length];
+      for (int i = 0; i < terms.length; i++) {
+        out[i] = ""+terms[i];
+      }
+      return out;
+    }
+    public ItemizedFilter(String keyField, int [] keys) {
+      super(keyField, int2str(keys));
+    }
+    public ItemizedFilter(int [] keys) {
+      super(KEY, int2str(keys));
+    }
+  }
+
+  /** helper for generating MultiPhraseQueries */
+  public static Term[] ta(String[] s) {
+    Term[] t = new Term[s.length];
+    for (int i = 0; i < s.length; i++) {
+      t[i] = new Term(FIELD, s[i]);
+    }
+    return t;
+  }
+
+  /** MACRO for SpanTermQuery */
+  public SpanTermQuery st(String s) {
+    return new SpanTermQuery(new Term(FIELD,s));
+  }
+  
+  /** MACRO for SpanNotQuery */
+  public SpanNotQuery snot(SpanQuery i, SpanQuery e) {
+    return new SpanNotQuery(i,e);
+  }
+
+  /** MACRO for SpanOrQuery containing two SpanTerm queries */
+  public SpanOrQuery sor(String s, String e) {
+    return sor(st(s), st(e));
+  }
+  /** MACRO for SpanOrQuery containing two SpanQueries */
+  public SpanOrQuery sor(SpanQuery s, SpanQuery e) {
+    return new SpanOrQuery(new SpanQuery[] { s, e });
+  }
+  
+  /** MACRO for SpanOrQuery containing three SpanTerm queries */
+  public SpanOrQuery sor(String s, String m, String e) {
+    return sor(st(s), st(m), st(e));
+  }
+  /** MACRO for SpanOrQuery containing two SpanQueries */
+  public SpanOrQuery sor(SpanQuery s, SpanQuery m, SpanQuery e) {
+    return new SpanOrQuery(new SpanQuery[] { s, m, e });
+  }
+  
+  /** MACRO for SpanNearQuery containing two SpanTerm queries */
+  public SpanNearQuery snear(String s, String e, int slop, boolean inOrder) {
+    return snear(st(s), st(e), slop, inOrder);
+  }
+  /** MACRO for SpanNearQuery containing two SpanQueries */
+  public SpanNearQuery snear(SpanQuery s, SpanQuery e,
+                             int slop, boolean inOrder) {
+    return new SpanNearQuery(new SpanQuery[] { s, e }, slop, inOrder);
+  }
+  
+  
+  /** MACRO for SpanNearQuery containing three SpanTerm queries */
+  public SpanNearQuery snear(String s, String m, String e,
+                             int slop, boolean inOrder) {
+    return snear(st(s), st(m), st(e), slop, inOrder);
+  }
+  /** MACRO for SpanNearQuery containing three SpanQueries */
+  public SpanNearQuery snear(SpanQuery s, SpanQuery m, SpanQuery e,
+                             int slop, boolean inOrder) {
+    return new SpanNearQuery(new SpanQuery[] { s, m, e }, slop, inOrder);
+  }
+  
+  /** MACRO for SpanFirst(SpanTermQuery) */
+  public SpanFirstQuery sf(String s, int b) {
+    return new SpanFirstQuery(st(s), b);
+  }
+
+  /**
+   * MACRO: Wraps a Query in a BooleanQuery so that it is optional, along
+   * with a second prohibited clause which will never match anything
+   */
+  public Query optB(String q) throws Exception {
+    return optB(makeQuery(q));
+  }
+  /**
+   * MACRO: Wraps a Query in a BooleanQuery so that it is optional, along
+   * with a second prohibited clause which will never match anything
+   */
+  public Query optB(Query q) throws Exception {
+    BooleanQuery bq = new BooleanQuery(true);
+    bq.add(q, BooleanClause.Occur.SHOULD);
+    bq.add(new TermQuery(new Term("NEVER","MATCH")), BooleanClause.Occur.MUST_NOT);
+    return bq;
+  }
+  
+  /**
+   * MACRO: Wraps a Query in a BooleanQuery so that it is required, along
+   * with a second optional clause which will match everything
+   */
+  public Query reqB(String q) throws Exception {
+    return reqB(makeQuery(q));
+  }
+  /**
+   * MACRO: Wraps a Query in a BooleanQuery so that it is required, along
+   * with a second optional clause which will match everything
+   */
+  public Query reqB(Query q) throws Exception {
+    BooleanQuery bq = new BooleanQuery(true);
+    bq.add(q, BooleanClause.Occur.MUST);
+    bq.add(new TermQuery(new Term(FIELD,"w1")), BooleanClause.Occur.SHOULD);
+    return bq;
+  }
+  
+  /**
+   * Placeholder: JUnit freaks if you don't have one test ... making
+   * class abstract doesn't help
+   */
+  public void testNoop() {
+    /* NOOP */
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestFieldCache.java b/lucene/backwards/src/test/org/apache/lucene/search/TestFieldCache.java
new file mode 100644
index 0000000..b4028d2
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestFieldCache.java
@@ -0,0 +1,137 @@
+package org.apache.lucene.search;
+
+/**
+ * Copyright 2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+
+public class TestFieldCache extends LuceneTestCase {
+  protected IndexReader reader;
+  private int NUM_DOCS;
+  private Directory directory;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    NUM_DOCS = atLeast(1000);
+    directory = newDirectory();
+    RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    long theLong = Long.MAX_VALUE;
+    double theDouble = Double.MAX_VALUE;
+    byte theByte = Byte.MAX_VALUE;
+    short theShort = Short.MAX_VALUE;
+    int theInt = Integer.MAX_VALUE;
+    float theFloat = Float.MAX_VALUE;
+    for (int i = 0; i < NUM_DOCS; i++){
+      Document doc = new Document();
+      doc.add(newField("theLong", String.valueOf(theLong--), Field.Store.NO, Field.Index.NOT_ANALYZED));
+      doc.add(newField("theDouble", String.valueOf(theDouble--), Field.Store.NO, Field.Index.NOT_ANALYZED));
+      doc.add(newField("theByte", String.valueOf(theByte--), Field.Store.NO, Field.Index.NOT_ANALYZED));
+      doc.add(newField("theShort", String.valueOf(theShort--), Field.Store.NO, Field.Index.NOT_ANALYZED));
+      doc.add(newField("theInt", String.valueOf(theInt--), Field.Store.NO, Field.Index.NOT_ANALYZED));
+      doc.add(newField("theFloat", String.valueOf(theFloat--), Field.Store.NO, Field.Index.NOT_ANALYZED));
+      writer.addDocument(doc);
+    }
+    writer.close();
+    reader = IndexReader.open(directory, true);
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    reader.close();
+    directory.close();
+    super.tearDown();
+  }
+  
+  public void testInfoStream() throws Exception {
+    try {
+      FieldCache cache = FieldCache.DEFAULT;
+      ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
+      cache.setInfoStream(new PrintStream(bos));
+      cache.getDoubles(reader, "theDouble");
+      cache.getFloats(reader, "theDouble");
+      assertTrue(bos.toString().indexOf("WARNING") != -1);
+    } finally {
+      FieldCache.DEFAULT.purgeAllCaches();
+    }
+  }
+
+  public void test() throws IOException {
+    FieldCache cache = FieldCache.DEFAULT;
+    double [] doubles = cache.getDoubles(reader, "theDouble");
+    assertSame("Second request to cache return same array", doubles, cache.getDoubles(reader, "theDouble"));
+    assertSame("Second request with explicit parser return same array", doubles, cache.getDoubles(reader, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER));
+    assertTrue("doubles Size: " + doubles.length + " is not: " + NUM_DOCS, doubles.length == NUM_DOCS);
+    for (int i = 0; i < doubles.length; i++) {
+      assertTrue(doubles[i] + " does not equal: " + (Double.MAX_VALUE - i), doubles[i] == (Double.MAX_VALUE - i));
+
+    }
+    
+    long [] longs = cache.getLongs(reader, "theLong");
+    assertSame("Second request to cache return same array", longs, cache.getLongs(reader, "theLong"));
+    assertSame("Second request with explicit parser return same array", longs, cache.getLongs(reader, "theLong", FieldCache.DEFAULT_LONG_PARSER));
+    assertTrue("longs Size: " + longs.length + " is not: " + NUM_DOCS, longs.length == NUM_DOCS);
+    for (int i = 0; i < longs.length; i++) {
+      assertTrue(longs[i] + " does not equal: " + (Long.MAX_VALUE - i), longs[i] == (Long.MAX_VALUE - i));
+
+    }
+    
+    byte [] bytes = cache.getBytes(reader, "theByte");
+    assertSame("Second request to cache return same array", bytes, cache.getBytes(reader, "theByte"));
+    assertSame("Second request with explicit parser return same array", bytes, cache.getBytes(reader, "theByte", FieldCache.DEFAULT_BYTE_PARSER));
+    assertTrue("bytes Size: " + bytes.length + " is not: " + NUM_DOCS, bytes.length == NUM_DOCS);
+    for (int i = 0; i < bytes.length; i++) {
+      assertTrue(bytes[i] + " does not equal: " + (Byte.MAX_VALUE - i), bytes[i] == (byte) (Byte.MAX_VALUE - i));
+
+    }
+    
+    short [] shorts = cache.getShorts(reader, "theShort");
+    assertSame("Second request to cache return same array", shorts, cache.getShorts(reader, "theShort"));
+    assertSame("Second request with explicit parser return same array", shorts, cache.getShorts(reader, "theShort", FieldCache.DEFAULT_SHORT_PARSER));
+    assertTrue("shorts Size: " + shorts.length + " is not: " + NUM_DOCS, shorts.length == NUM_DOCS);
+    for (int i = 0; i < shorts.length; i++) {
+      assertTrue(shorts[i] + " does not equal: " + (Short.MAX_VALUE - i), shorts[i] == (short) (Short.MAX_VALUE - i));
+
+    }
+    
+    int [] ints = cache.getInts(reader, "theInt");
+    assertSame("Second request to cache return same array", ints, cache.getInts(reader, "theInt"));
+    assertSame("Second request with explicit parser return same array", ints, cache.getInts(reader, "theInt", FieldCache.DEFAULT_INT_PARSER));
+    assertTrue("ints Size: " + ints.length + " is not: " + NUM_DOCS, ints.length == NUM_DOCS);
+    for (int i = 0; i < ints.length; i++) {
+      assertTrue(ints[i] + " does not equal: " + (Integer.MAX_VALUE - i), ints[i] == (Integer.MAX_VALUE - i));
+
+    }
+    
+    float [] floats = cache.getFloats(reader, "theFloat");
+    assertSame("Second request to cache return same array", floats, cache.getFloats(reader, "theFloat"));
+    assertSame("Second request with explicit parser return same array", floats, cache.getFloats(reader, "theFloat", FieldCache.DEFAULT_FLOAT_PARSER));
+    assertTrue("floats Size: " + floats.length + " is not: " + NUM_DOCS, floats.length == NUM_DOCS);
+    for (int i = 0; i < floats.length; i++) {
+      assertTrue(floats[i] + " does not equal: " + (Float.MAX_VALUE - i), floats[i] == (Float.MAX_VALUE - i));
+
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java b/lucene/backwards/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
new file mode 100644
index 0000000..f192d83
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
@@ -0,0 +1,587 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.store.Directory;
+import org.junit.Test;
+
+/**
+ * A basic 'positive' Unit test class for the FieldCacheRangeFilter class.
+ *
+ * <p>
+ * NOTE: at the moment, this class only tests for 'positive' results,
+ * it does not verify the results to ensure there are no 'false positives',
+ * nor does it adequately test 'negative' results.  It also does not test
+ * that garbage in results in an Exception.
+ */
+public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
+
+  @Test
+  public void testRangeFilterId() throws IOException {
+
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+
+    int medId = ((maxId - minId) / 2);
+        
+    String minIP = pad(minId);
+    String maxIP = pad(maxId);
+    String medIP = pad(medId);
+    
+    int numDocs = reader.numDocs();
+        
+    assertEquals("num of docs", numDocs, 1+ maxId - minId);
+        
+    ScoreDoc[] result;
+    Query q = new TermQuery(new Term("body","body"));
+
+    // test id, bounded on both ends
+    FieldCacheRangeFilter<String> fcrf;
+    result = search.search(q,fcrf = FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,T,T), numDocs).scoreDocs;
+    assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,T,F), numDocs).scoreDocs;
+    assertEquals("all but last", numDocs-1, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,F,T), numDocs).scoreDocs;
+    assertEquals("all but first", numDocs-1, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,F,F), numDocs).scoreDocs;
+    assertEquals("all but ends", numDocs-2, result.length);
+    
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",medIP,maxIP,T,T), numDocs).scoreDocs;
+    assertEquals("med and up", 1+ maxId-medId, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,medIP,T,T), numDocs).scoreDocs;
+    assertEquals("up to med", 1+ medId-minId, result.length);
+
+    // unbounded id
+
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",null,null,T,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,null,T,F), numDocs).scoreDocs;
+    assertEquals("min and up", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",null,maxIP,F,T), numDocs).scoreDocs;
+    assertEquals("max and down", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,null,F,F), numDocs).scoreDocs;
+    assertEquals("not min, but up", numDocs-1, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",null,maxIP,F,F), numDocs).scoreDocs;
+    assertEquals("not max, but down", numDocs-1, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",medIP,maxIP,T,F), numDocs).scoreDocs;
+    assertEquals("med and up, not max", maxId-medId, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,medIP,F,T), numDocs).scoreDocs;
+    assertEquals("not min, up to med", medId-minId, result.length);
+
+    // very small sets
+
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,minIP,F,F), numDocs).scoreDocs;
+    assertEquals("min,min,F,F", 0, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",medIP,medIP,F,F), numDocs).scoreDocs;
+    assertEquals("med,med,F,F", 0, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",maxIP,maxIP,F,F), numDocs).scoreDocs;
+    assertEquals("max,max,F,F", 0, result.length);
+                     
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,minIP,T,T), numDocs).scoreDocs;
+    assertEquals("min,min,T,T", 1, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",null,minIP,F,T), numDocs).scoreDocs;
+    assertEquals("nul,min,F,T", 1, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",maxIP,maxIP,T,T), numDocs).scoreDocs;
+    assertEquals("max,max,T,T", 1, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",maxIP,null,T,F), numDocs).scoreDocs;
+    assertEquals("max,nul,T,T", 1, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",medIP,medIP,T,T), numDocs).scoreDocs;
+    assertEquals("med,med,T,T", 1, result.length);
+    search.close();
+  }
+
+  @Test
+  public void testFieldCacheRangeFilterRand() throws IOException {
+
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+
+    String minRP = pad(signedIndexDir.minR);
+    String maxRP = pad(signedIndexDir.maxR);
+    
+    int numDocs = reader.numDocs();
+        
+    assertEquals("num of docs", numDocs, 1+ maxId - minId);
+        
+    ScoreDoc[] result;
+    Query q = new TermQuery(new Term("body","body"));
+
+    // test extremes, bounded on both ends
+        
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",minRP,maxRP,T,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",minRP,maxRP,T,F), numDocs).scoreDocs;
+    assertEquals("all but biggest", numDocs-1, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",minRP,maxRP,F,T), numDocs).scoreDocs;
+    assertEquals("all but smallest", numDocs-1, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",minRP,maxRP,F,F), numDocs).scoreDocs;
+    assertEquals("all but extremes", numDocs-2, result.length);
+    
+    // unbounded
+
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",minRP,null,T,F), numDocs).scoreDocs;
+    assertEquals("smallest and up", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",null,maxRP,F,T), numDocs).scoreDocs;
+    assertEquals("biggest and down", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",minRP,null,F,F), numDocs).scoreDocs;
+    assertEquals("not smallest, but up", numDocs-1, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",null,maxRP,F,F), numDocs).scoreDocs;
+    assertEquals("not biggest, but down", numDocs-1, result.length);
+        
+    // very small sets
+
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",minRP,minRP,F,F), numDocs).scoreDocs;
+    assertEquals("min,min,F,F", 0, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",maxRP,maxRP,F,F), numDocs).scoreDocs;
+    assertEquals("max,max,F,F", 0, result.length);
+                     
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",minRP,minRP,T,T), numDocs).scoreDocs;
+    assertEquals("min,min,T,T", 1, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",null,minRP,F,T), numDocs).scoreDocs;
+    assertEquals("nul,min,F,T", 1, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",maxRP,maxRP,T,T), numDocs).scoreDocs;
+    assertEquals("max,max,T,T", 1, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",maxRP,null,T,F), numDocs).scoreDocs;
+    assertEquals("max,nul,T,T", 1, result.length);
+    search.close();
+  }
+  
+  // byte-ranges cannot be tested, because all ranges are too big for bytes, need an extra range for that
+
+  @Test
+  public void testFieldCacheRangeFilterShorts() throws IOException {
+
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+
+    int numDocs = reader.numDocs();
+    int medId = ((maxId - minId) / 2);
+    Short minIdO = Short.valueOf((short) minId);
+    Short maxIdO = Short.valueOf((short) maxId);
+    Short medIdO = Short.valueOf((short) medId);
+        
+    assertEquals("num of docs", numDocs, 1+ maxId - minId);
+        
+    ScoreDoc[] result;
+    Query q = new TermQuery(new Term("body","body"));
+
+    // test id, bounded on both ends
+    FieldCacheRangeFilter<Short> fcrf;
+    result = search.search(q,fcrf=FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
+    assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,T,F), numDocs).scoreDocs;
+    assertEquals("all but last", numDocs-1, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,F,T), numDocs).scoreDocs;
+    assertEquals("all but first", numDocs-1, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,F,F), numDocs).scoreDocs;
+    assertEquals("all but ends", numDocs-2, result.length);
+    
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",medIdO,maxIdO,T,T), numDocs).scoreDocs;
+    assertEquals("med and up", 1+ maxId-medId, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,medIdO,T,T), numDocs).scoreDocs;
+    assertEquals("up to med", 1+ medId-minId, result.length);
+    
+    // unbounded id
+
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",null,null,T,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,null,T,F), numDocs).scoreDocs;
+    assertEquals("min and up", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",null,maxIdO,F,T), numDocs).scoreDocs;
+    assertEquals("max and down", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,null,F,F), numDocs).scoreDocs;
+    assertEquals("not min, but up", numDocs-1, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",null,maxIdO,F,F), numDocs).scoreDocs;
+    assertEquals("not max, but down", numDocs-1, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",medIdO,maxIdO,T,F), numDocs).scoreDocs;
+    assertEquals("med and up, not max", maxId-medId, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,medIdO,F,T), numDocs).scoreDocs;
+    assertEquals("not min, up to med", medId-minId, result.length);
+
+    // very small sets
+
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,minIdO,F,F), numDocs).scoreDocs;
+    assertEquals("min,min,F,F", 0, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",medIdO,medIdO,F,F), numDocs).scoreDocs;
+    assertEquals("med,med,F,F", 0, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",maxIdO,maxIdO,F,F), numDocs).scoreDocs;
+    assertEquals("max,max,F,F", 0, result.length);
+                     
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,minIdO,T,T), numDocs).scoreDocs;
+    assertEquals("min,min,T,T", 1, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",null,minIdO,F,T), numDocs).scoreDocs;
+    assertEquals("nul,min,F,T", 1, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",maxIdO,maxIdO,T,T), numDocs).scoreDocs;
+    assertEquals("max,max,T,T", 1, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",maxIdO,null,T,F), numDocs).scoreDocs;
+    assertEquals("max,nul,T,T", 1, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",medIdO,medIdO,T,T), numDocs).scoreDocs;
+    assertEquals("med,med,T,T", 1, result.length);
+    
+    // special cases
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",Short.valueOf(Short.MAX_VALUE),null,F,F), numDocs).scoreDocs;
+    assertEquals("overflow special case", 0, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",null,Short.valueOf(Short.MIN_VALUE),F,F), numDocs).scoreDocs;
+    assertEquals("overflow special case", 0, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",maxIdO,minIdO,T,T), numDocs).scoreDocs;
+    assertEquals("inverse range", 0, result.length);
+    search.close();
+  }
+  
+  @Test
+  public void testFieldCacheRangeFilterInts() throws IOException {
+
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+
+    int numDocs = reader.numDocs();
+    int medId = ((maxId - minId) / 2);
+    Integer minIdO = Integer.valueOf(minId);
+    Integer maxIdO = Integer.valueOf(maxId);
+    Integer medIdO = Integer.valueOf(medId);
+        
+    assertEquals("num of docs", numDocs, 1+ maxId - minId);
+        
+    ScoreDoc[] result;
+    Query q = new TermQuery(new Term("body","body"));
+
+    // test id, bounded on both ends
+        
+    FieldCacheRangeFilter<Integer> fcrf;
+    result = search.search(q,fcrf=FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
+    assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,T,F), numDocs).scoreDocs;
+    assertEquals("all but last", numDocs-1, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,F,T), numDocs).scoreDocs;
+    assertEquals("all but first", numDocs-1, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,F,F), numDocs).scoreDocs;
+    assertEquals("all but ends", numDocs-2, result.length);
+    
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",medIdO,maxIdO,T,T), numDocs).scoreDocs;
+    assertEquals("med and up", 1+ maxId-medId, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,medIdO,T,T), numDocs).scoreDocs;
+    assertEquals("up to med", 1+ medId-minId, result.length);
+    
+    // unbounded id
+
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",null,null,T,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,null,T,F), numDocs).scoreDocs;
+    assertEquals("min and up", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",null,maxIdO,F,T), numDocs).scoreDocs;
+    assertEquals("max and down", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,null,F,F), numDocs).scoreDocs;
+    assertEquals("not min, but up", numDocs-1, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",null,maxIdO,F,F), numDocs).scoreDocs;
+    assertEquals("not max, but down", numDocs-1, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",medIdO,maxIdO,T,F), numDocs).scoreDocs;
+    assertEquals("med and up, not max", maxId-medId, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,medIdO,F,T), numDocs).scoreDocs;
+    assertEquals("not min, up to med", medId-minId, result.length);
+
+    // very small sets
+
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,minIdO,F,F), numDocs).scoreDocs;
+    assertEquals("min,min,F,F", 0, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",medIdO,medIdO,F,F), numDocs).scoreDocs;
+    assertEquals("med,med,F,F", 0, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",maxIdO,maxIdO,F,F), numDocs).scoreDocs;
+    assertEquals("max,max,F,F", 0, result.length);
+                     
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,minIdO,T,T), numDocs).scoreDocs;
+    assertEquals("min,min,T,T", 1, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",null,minIdO,F,T), numDocs).scoreDocs;
+    assertEquals("nul,min,F,T", 1, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",maxIdO,maxIdO,T,T), numDocs).scoreDocs;
+    assertEquals("max,max,T,T", 1, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",maxIdO,null,T,F), numDocs).scoreDocs;
+    assertEquals("max,nul,T,T", 1, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",medIdO,medIdO,T,T), numDocs).scoreDocs;
+    assertEquals("med,med,T,T", 1, result.length);
+    
+    // special cases
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",Integer.valueOf(Integer.MAX_VALUE),null,F,F), numDocs).scoreDocs;
+    assertEquals("overflow special case", 0, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",null,Integer.valueOf(Integer.MIN_VALUE),F,F), numDocs).scoreDocs;
+    assertEquals("overflow special case", 0, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",maxIdO,minIdO,T,T), numDocs).scoreDocs;
+    assertEquals("inverse range", 0, result.length);
+    search.close();
+  }
+  
+  @Test
+  public void testFieldCacheRangeFilterLongs() throws IOException {
+
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+
+    int numDocs = reader.numDocs();
+    int medId = ((maxId - minId) / 2);
+    Long minIdO = Long.valueOf(minId);
+    Long maxIdO = Long.valueOf(maxId);
+    Long medIdO = Long.valueOf(medId);
+        
+    assertEquals("num of docs", numDocs, 1+ maxId - minId);
+        
+    ScoreDoc[] result;
+    Query q = new TermQuery(new Term("body","body"));
+
+    // test id, bounded on both ends
+        
+    FieldCacheRangeFilter<Long> fcrf;
+    result = search.search(q,fcrf=FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
+    assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,T,F), numDocs).scoreDocs;
+    assertEquals("all but last", numDocs-1, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,F,T), numDocs).scoreDocs;
+    assertEquals("all but first", numDocs-1, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,F,F), numDocs).scoreDocs;
+    assertEquals("all but ends", numDocs-2, result.length);
+    
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",medIdO,maxIdO,T,T), numDocs).scoreDocs;
+    assertEquals("med and up", 1+ maxId-medId, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,medIdO,T,T), numDocs).scoreDocs;
+    assertEquals("up to med", 1+ medId-minId, result.length);
+    
+    // unbounded id
+
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",null,null,T,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,null,T,F), numDocs).scoreDocs;
+    assertEquals("min and up", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",null,maxIdO,F,T), numDocs).scoreDocs;
+    assertEquals("max and down", numDocs, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,null,F,F), numDocs).scoreDocs;
+    assertEquals("not min, but up", numDocs-1, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",null,maxIdO,F,F), numDocs).scoreDocs;
+    assertEquals("not max, but down", numDocs-1, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",medIdO,maxIdO,T,F), numDocs).scoreDocs;
+    assertEquals("med and up, not max", maxId-medId, result.length);
+        
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,medIdO,F,T), numDocs).scoreDocs;
+    assertEquals("not min, up to med", medId-minId, result.length);
+
+    // very small sets
+
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,minIdO,F,F), numDocs).scoreDocs;
+    assertEquals("min,min,F,F", 0, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",medIdO,medIdO,F,F), numDocs).scoreDocs;
+    assertEquals("med,med,F,F", 0, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",maxIdO,maxIdO,F,F), numDocs).scoreDocs;
+    assertEquals("max,max,F,F", 0, result.length);
+                     
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,minIdO,T,T), numDocs).scoreDocs;
+    assertEquals("min,min,T,T", 1, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",null,minIdO,F,T), numDocs).scoreDocs;
+    assertEquals("nul,min,F,T", 1, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",maxIdO,maxIdO,T,T), numDocs).scoreDocs;
+    assertEquals("max,max,T,T", 1, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",maxIdO,null,T,F), numDocs).scoreDocs;
+    assertEquals("max,nul,T,T", 1, result.length);
+
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",medIdO,medIdO,T,T), numDocs).scoreDocs;
+    assertEquals("med,med,T,T", 1, result.length);
+    
+    // special cases
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",Long.valueOf(Long.MAX_VALUE),null,F,F), numDocs).scoreDocs;
+    assertEquals("overflow special case", 0, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",null,Long.valueOf(Long.MIN_VALUE),F,F), numDocs).scoreDocs;
+    assertEquals("overflow special case", 0, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",maxIdO,minIdO,T,T), numDocs).scoreDocs;
+    assertEquals("inverse range", 0, result.length);
+    search.close();
+  }
+  
+  // float and double tests are a bit minimalistic, but its complicated, because missing precision
+  
+  @Test
+  public void testFieldCacheRangeFilterFloats() throws IOException {
+
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+
+    int numDocs = reader.numDocs();
+    Float minIdO = Float.valueOf(minId + .5f);
+    Float medIdO = Float.valueOf(minIdO.floatValue() + ((maxId-minId))/2.0f);
+        
+    ScoreDoc[] result;
+    Query q = new TermQuery(new Term("body","body"));
+
+    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id",minIdO,medIdO,T,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs/2, result.length);
+    int count = 0;
+    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id",null,medIdO,F,T), numDocs).scoreDocs;
+    count += result.length;
+    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id",medIdO,null,F,F), numDocs).scoreDocs;
+    count += result.length;
+    assertEquals("sum of two concenatted ranges", numDocs, count);
+    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id",null,null,T,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id",Float.valueOf(Float.POSITIVE_INFINITY),null,F,F), numDocs).scoreDocs;
+    assertEquals("infinity special case", 0, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id",null,Float.valueOf(Float.NEGATIVE_INFINITY),F,F), numDocs).scoreDocs;
+    assertEquals("infinity special case", 0, result.length);
+    search.close();
+  }
+  
+  @Test
+  public void testFieldCacheRangeFilterDoubles() throws IOException {
+
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+
+    int numDocs = reader.numDocs();
+    Double minIdO = Double.valueOf(minId + .5);
+    Double medIdO = Double.valueOf(minIdO.floatValue() + ((maxId-minId))/2.0);
+        
+    ScoreDoc[] result;
+    Query q = new TermQuery(new Term("body","body"));
+
+    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id",minIdO,medIdO,T,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs/2, result.length);
+    int count = 0;
+    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id",null,medIdO,F,T), numDocs).scoreDocs;
+    count += result.length;
+    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id",medIdO,null,F,F), numDocs).scoreDocs;
+    count += result.length;
+    assertEquals("sum of two concenatted ranges", numDocs, count);
+    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id",null,null,T,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id",Double.valueOf(Double.POSITIVE_INFINITY),null,F,F), numDocs).scoreDocs;
+    assertEquals("infinity special case", 0, result.length);
+    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id",null, Double.valueOf(Double.NEGATIVE_INFINITY),F,F), numDocs).scoreDocs;
+    assertEquals("infinity special case", 0, result.length);
+    search.close();
+  }
+  
+  // test using a sparse index (with deleted docs). The DocIdSet should be not cacheable, as it uses TermDocs if the range contains 0
+  @Test
+  public void testSparseIndex() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+
+    for (int d = -20; d <= 20; d++) {
+      Document doc = new Document();
+      doc.add(newField("id",Integer.toString(d), Field.Store.NO, Field.Index.NOT_ANALYZED));
+      doc.add(newField("body","body", Field.Store.NO, Field.Index.NOT_ANALYZED));
+      writer.addDocument(doc);
+    }
+    
+    writer.optimize();
+    writer.deleteDocuments(new Term("id","0"));
+    writer.close();
+
+    IndexReader reader = IndexReader.open(dir, true);
+    IndexSearcher search = newSearcher(reader);
+    assertTrue(reader.hasDeletions());
+
+    ScoreDoc[] result;
+    FieldCacheRangeFilter<Byte> fcrf;
+    Query q = new TermQuery(new Term("body","body"));
+
+    result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
+    assertFalse("DocIdSet must be not cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
+    assertEquals("find all", 40, result.length);
+
+    result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 0),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
+    assertFalse("DocIdSet must be not cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
+    assertEquals("find all", 20, result.length);
+
+    result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 0),T,T), 100).scoreDocs;
+    assertFalse("DocIdSet must be not cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
+    assertEquals("find all", 20, result.length);
+
+    result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 10),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
+    assertTrue("DocIdSet must be cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
+    assertEquals("find all", 11, result.length);
+
+    result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) -10),T,T), 100).scoreDocs;
+    assertTrue("DocIdSet must be cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
+    assertEquals("find all", 11, result.length);
+    search.close();
+    reader.close();
+    dir.close();
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java b/lucene/backwards/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java
new file mode 100644
index 0000000..f526f3e
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java
@@ -0,0 +1,75 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.store.Directory;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A basic unit test for FieldCacheTermsFilter
+ *
+ * @see org.apache.lucene.search.FieldCacheTermsFilter
+ */
+public class TestFieldCacheTermsFilter extends LuceneTestCase {
+  public void testMissingTerms() throws Exception {
+    String fieldName = "field1";
+    Directory rd = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random, rd);
+    for (int i = 0; i < 100; i++) {
+      Document doc = new Document();
+      int term = i * 10; //terms are units of 10;
+      doc.add(newField(fieldName, "" + term, Field.Store.YES, Field.Index.NOT_ANALYZED));
+      w.addDocument(doc);
+    }
+    IndexReader reader = w.getReader();
+    w.close();
+
+    IndexSearcher searcher = newSearcher(reader);
+    int numDocs = reader.numDocs();
+    ScoreDoc[] results;
+    MatchAllDocsQuery q = new MatchAllDocsQuery();
+
+    List<String> terms = new ArrayList<String>();
+    terms.add("5");
+    results = searcher.search(q, new FieldCacheTermsFilter(fieldName,  terms.toArray(new String[0])), numDocs).scoreDocs;
+    assertEquals("Must match nothing", 0, results.length);
+
+    terms = new ArrayList<String>();
+    terms.add("10");
+    results = searcher.search(q, new FieldCacheTermsFilter(fieldName,  terms.toArray(new String[0])), numDocs).scoreDocs;
+    assertEquals("Must match 1", 1, results.length);
+
+    terms = new ArrayList<String>();
+    terms.add("10");
+    terms.add("20");
+    results = searcher.search(q, new FieldCacheTermsFilter(fieldName,  terms.toArray(new String[0])), numDocs).scoreDocs;
+    assertEquals("Must match 2", 2, results.length);
+
+    searcher.close();
+    reader.close();
+    rd.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestFilteredQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/TestFilteredQuery.java
new file mode 100644
index 0000000..cd6e210
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestFilteredQuery.java
@@ -0,0 +1,227 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.DocIdBitSet;
+import org.apache.lucene.util.LuceneTestCase;
+
+import java.util.BitSet;
+
+/**
+ * FilteredQuery JUnit tests.
+ *
+ * <p>Created: Apr 21, 2004 1:21:46 PM
+ *
+ *
+ * @since   1.4
+ */
+public class TestFilteredQuery extends LuceneTestCase {
+
+  private IndexSearcher searcher;
+  private IndexReader reader;
+  private Directory directory;
+  private Query query;
+  private Filter filter;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter (random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+
+    Document doc = new Document();
+    doc.add (newField("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED));
+    doc.add (newField("sorter", "b", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument (doc);
+
+    doc = new Document();
+    doc.add (newField("field", "one two three four", Field.Store.YES, Field.Index.ANALYZED));
+    doc.add (newField("sorter", "d", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument (doc);
+
+    doc = new Document();
+    doc.add (newField("field", "one two three y", Field.Store.YES, Field.Index.ANALYZED));
+    doc.add (newField("sorter", "a", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument (doc);
+
+    doc = new Document();
+    doc.add (newField("field", "one two x", Field.Store.YES, Field.Index.ANALYZED));
+    doc.add (newField("sorter", "c", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument (doc);
+
+    // tests here require single segment (eg try seed
+    // 8239472272678419952L), because SingleDocTestFilter(x)
+    // blindly accepts that docID in any sub-segment
+    writer.optimize();
+
+    reader = writer.getReader();
+    writer.close ();
+
+    searcher = newSearcher(reader);
+    query = new TermQuery (new Term ("field", "three"));
+    filter = newStaticFilterB();
+  }
+
+  // must be static for serialization tests
+  private static Filter newStaticFilterB() {
+    return new Filter() {
+      @Override
+      public DocIdSet getDocIdSet (IndexReader reader) {
+        BitSet bitset = new BitSet(5);
+        bitset.set (1);
+        bitset.set (3);
+        return new DocIdBitSet(bitset);
+      }
+    };
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    searcher.close();
+    reader.close();
+    directory.close();
+    super.tearDown();
+  }
+
+  public void testFilteredQuery()
+  throws Exception {
+    Query filteredquery = new FilteredQuery (query, filter);
+    ScoreDoc[] hits = searcher.search (filteredquery, null, 1000).scoreDocs;
+    assertEquals (1, hits.length);
+    assertEquals (1, hits[0].doc);
+    QueryUtils.check(random, filteredquery,searcher);
+
+    hits = searcher.search (filteredquery, null, 1000, new Sort(new SortField("sorter", SortField.STRING))).scoreDocs;
+    assertEquals (1, hits.length);
+    assertEquals (1, hits[0].doc);
+
+    filteredquery = new FilteredQuery (new TermQuery (new Term ("field", "one")), filter);
+    hits = searcher.search (filteredquery, null, 1000).scoreDocs;
+    assertEquals (2, hits.length);
+    QueryUtils.check(random, filteredquery,searcher);
+
+    filteredquery = new FilteredQuery (new TermQuery (new Term ("field", "x")), filter);
+    hits = searcher.search (filteredquery, null, 1000).scoreDocs;
+    assertEquals (1, hits.length);
+    assertEquals (3, hits[0].doc);
+    QueryUtils.check(random, filteredquery,searcher);
+
+    filteredquery = new FilteredQuery (new TermQuery (new Term ("field", "y")), filter);
+    hits = searcher.search (filteredquery, null, 1000).scoreDocs;
+    assertEquals (0, hits.length);
+    QueryUtils.check(random, filteredquery,searcher);
+    
+    // test boost
+    Filter f = newStaticFilterA();
+    
+    float boost = 2.5f;
+    BooleanQuery bq1 = new BooleanQuery();
+    TermQuery tq = new TermQuery (new Term ("field", "one"));
+    tq.setBoost(boost);
+    bq1.add(tq, Occur.MUST);
+    bq1.add(new TermQuery (new Term ("field", "five")), Occur.MUST);
+    
+    BooleanQuery bq2 = new BooleanQuery();
+    tq = new TermQuery (new Term ("field", "one"));
+    filteredquery = new FilteredQuery(tq, f);
+    filteredquery.setBoost(boost);
+    bq2.add(filteredquery, Occur.MUST);
+    bq2.add(new TermQuery (new Term ("field", "five")), Occur.MUST);
+    assertScoreEquals(bq1, bq2);
+    
+    assertEquals(boost, filteredquery.getBoost(), 0);
+    assertEquals(1.0f, tq.getBoost(), 0); // the boost value of the underlying query shouldn't have changed 
+  }
+
+  // must be static for serialization tests 
+  private static Filter newStaticFilterA() {
+    return new Filter() {
+      @Override
+      public DocIdSet getDocIdSet (IndexReader reader) {
+        BitSet bitset = new BitSet(5);
+        bitset.set(0, 5);
+        return new DocIdBitSet(bitset);
+      }
+    };
+  }
+  
+  /**
+   * Tests whether the scores of the two queries are the same.
+   */
+  public void assertScoreEquals(Query q1, Query q2) throws Exception {
+    ScoreDoc[] hits1 = searcher.search (q1, null, 1000).scoreDocs;
+    ScoreDoc[] hits2 = searcher.search (q2, null, 1000).scoreDocs;
+      
+    assertEquals(hits1.length, hits2.length);
+    
+    for (int i = 0; i < hits1.length; i++) {
+      assertEquals(hits1[i].score, hits2[i].score, 0.0000001f);
+    }
+  }
+
+  /**
+   * This tests FilteredQuery's rewrite correctness
+   */
+  public void testRangeQuery() throws Exception {
+    TermRangeQuery rq = new TermRangeQuery(
+        "sorter", "b", "d", true, true);
+
+    Query filteredquery = new FilteredQuery(rq, filter);
+    ScoreDoc[] hits = searcher.search(filteredquery, null, 1000).scoreDocs;
+    assertEquals(2, hits.length);
+    QueryUtils.check(random, filteredquery,searcher);
+  }
+
+  public void testBoolean() throws Exception {
+    BooleanQuery bq = new BooleanQuery();
+    Query query = new FilteredQuery(new MatchAllDocsQuery(),
+        new SingleDocTestFilter(0));
+    bq.add(query, BooleanClause.Occur.MUST);
+    query = new FilteredQuery(new MatchAllDocsQuery(),
+        new SingleDocTestFilter(1));
+    bq.add(query, BooleanClause.Occur.MUST);
+    ScoreDoc[] hits = searcher.search(bq, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+    QueryUtils.check(random, query,searcher);    
+  }
+
+  // Make sure BooleanQuery, which does out-of-order
+  // scoring, inside FilteredQuery, works
+  public void testBoolean2() throws Exception {
+    BooleanQuery bq = new BooleanQuery();
+    Query query = new FilteredQuery(bq,
+        new SingleDocTestFilter(0));
+    bq.add(new TermQuery(new Term("field", "one")), BooleanClause.Occur.SHOULD);
+    bq.add(new TermQuery(new Term("field", "two")), BooleanClause.Occur.SHOULD);
+    ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    QueryUtils.check(random, query,searcher);    
+  }
+}
+
+
+
+
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestFilteredSearch.java b/lucene/backwards/src/test/org/apache/lucene/search/TestFilteredSearch.java
new file mode 100644
index 0000000..6af9e58
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestFilteredSearch.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.search;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.CorruptIndexException;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.FixedBitSet;
+
+
+/**
+ *
+ */
+public class TestFilteredSearch extends LuceneTestCase {
+
+  private static final String FIELD = "category";
+  
+  public void testFilteredSearch() throws CorruptIndexException, LockObtainFailedException, IOException {
+    boolean enforceSingleSegment = true;
+    Directory directory = newDirectory();
+    int[] filterBits = {1, 36};
+    SimpleDocIdSetFilter filter = new SimpleDocIdSetFilter(filterBits);
+    IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    searchFiltered(writer, directory, filter, enforceSingleSegment);
+    // run the test on more than one segment
+    enforceSingleSegment = false;
+    // reset - it is stateful
+    filter.reset();
+    writer.close();
+    writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10).setMergePolicy(newLogMergePolicy()));
+    // we index 60 docs - this will create 6 segments
+    searchFiltered(writer, directory, filter, enforceSingleSegment);
+    writer.close();
+    directory.close();
+  }
+
+  public void searchFiltered(IndexWriter writer, Directory directory, SimpleDocIdSetFilter filter, boolean optimize) {
+    try {
+      for (int i = 0; i < 60; i++) {//Simple docs
+        Document doc = new Document();
+        doc.add(newField(FIELD, Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
+        writer.addDocument(doc);
+      }
+      if(optimize)
+        writer.optimize();
+      writer.close();
+
+      BooleanQuery booleanQuery = new BooleanQuery();
+      booleanQuery.add(new TermQuery(new Term(FIELD, "36")), BooleanClause.Occur.SHOULD);
+     
+     
+      IndexSearcher indexSearcher = new IndexSearcher(directory, true);
+      filter.setDocBases(indexSearcher.getIndexReader());
+      ScoreDoc[] hits = indexSearcher.search(booleanQuery, filter, 1000).scoreDocs;
+      assertEquals("Number of matched documents", 1, hits.length);
+      indexSearcher.close();
+    }
+    catch (IOException e) {
+      fail(e.getMessage());
+    }
+    
+  }
+ 
+  public static final class SimpleDocIdSetFilter extends Filter {
+    private final int[] docs;
+    private int index;
+    private Map<IndexReader,Integer> docBasePerSub;
+
+    public SimpleDocIdSetFilter(int[] docs) {
+      this.docs = docs;
+    }
+
+    public void setDocBases(IndexReader r) {
+      int maxDoc = 0;
+      docBasePerSub = new HashMap<IndexReader,Integer>();
+      for(IndexReader sub : r.getSequentialSubReaders()) {
+        docBasePerSub.put(sub, maxDoc);
+        maxDoc += sub.maxDoc();
+      }
+    }
+
+    @Override
+    public DocIdSet getDocIdSet(IndexReader reader) {
+      final FixedBitSet set = new FixedBitSet(reader.maxDoc());
+      final int docBase = docBasePerSub.get(reader);
+      final int limit = docBase+reader.maxDoc();
+      for (;index < docs.length; index++) {
+        final int docId = docs[index];
+        if (docId > limit)
+          break;
+        if (docId >= docBase) {
+          set.set(docId-docBase);
+        }
+      }
+      return set.cardinality() == 0 ? null:set;
+    }
+    
+    public void reset(){
+      index = 0;
+    }
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestFuzzyQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/TestFuzzyQuery.java
new file mode 100644
index 0000000..f43b9ed
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestFuzzyQuery.java
@@ -0,0 +1,390 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.List;
+import java.util.Arrays;
+import java.io.IOException;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.queryParser.QueryParser;
+
+/**
+ * Tests {@link FuzzyQuery}.
+ *
+ */
+public class TestFuzzyQuery extends LuceneTestCase {
+
+  public void testFuzziness() throws Exception {
+    Directory directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory);
+    addDoc("aaaaa", writer);
+    addDoc("aaaab", writer);
+    addDoc("aaabb", writer);
+    addDoc("aabbb", writer);
+    addDoc("abbbb", writer);
+    addDoc("bbbbb", writer);
+    addDoc("ddddd", writer);
+
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = newSearcher(reader);
+    writer.close();
+
+    FuzzyQuery query = new FuzzyQuery(new Term("field", "aaaaa"), FuzzyQuery.defaultMinSimilarity, 0);   
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(3, hits.length);
+    
+    // same with prefix
+    query = new FuzzyQuery(new Term("field", "aaaaa"), FuzzyQuery.defaultMinSimilarity, 1);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(3, hits.length);
+    query = new FuzzyQuery(new Term("field", "aaaaa"), FuzzyQuery.defaultMinSimilarity, 2);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(3, hits.length);
+    query = new FuzzyQuery(new Term("field", "aaaaa"), FuzzyQuery.defaultMinSimilarity, 3);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(3, hits.length);
+    query = new FuzzyQuery(new Term("field", "aaaaa"), FuzzyQuery.defaultMinSimilarity, 4);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(2, hits.length);
+    query = new FuzzyQuery(new Term("field", "aaaaa"), FuzzyQuery.defaultMinSimilarity, 5);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    query = new FuzzyQuery(new Term("field", "aaaaa"), FuzzyQuery.defaultMinSimilarity, 6);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    
+    // test scoring
+    query = new FuzzyQuery(new Term("field", "bbbbb"), FuzzyQuery.defaultMinSimilarity, 0);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("3 documents should match", 3, hits.length);
+    List<String> order = Arrays.asList("bbbbb","abbbb","aabbb");
+    for (int i = 0; i < hits.length; i++) {
+      final String term = searcher.doc(hits[i].doc).get("field");
+      //System.out.println(hits[i].score);
+      assertEquals(order.get(i), term);
+    }
+
+    // test pq size by supplying maxExpansions=2
+    // This query would normally return 3 documents, because 3 terms match (see above):
+    query = new FuzzyQuery(new Term("field", "bbbbb"), FuzzyQuery.defaultMinSimilarity, 0, 2); 
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("only 2 documents should match", 2, hits.length);
+    order = Arrays.asList("bbbbb","abbbb");
+    for (int i = 0; i < hits.length; i++) {
+      final String term = searcher.doc(hits[i].doc).get("field");
+      //System.out.println(hits[i].score);
+      assertEquals(order.get(i), term);
+    }
+
+    // not similar enough:
+    query = new FuzzyQuery(new Term("field", "xxxxx"), FuzzyQuery.defaultMinSimilarity, 0);  	
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+    query = new FuzzyQuery(new Term("field", "aaccc"), FuzzyQuery.defaultMinSimilarity, 0);   // edit distance to "aaaaa" = 3
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+
+    // query identical to a word in the index:
+    query = new FuzzyQuery(new Term("field", "aaaaa"), FuzzyQuery.defaultMinSimilarity, 0);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(3, hits.length);
+    assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa"));
+    // default allows for up to two edits:
+    assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab"));
+    assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb"));
+
+    // query similar to a word in the index:
+    query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 0);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(3, hits.length);
+    assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa"));
+    assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab"));
+    assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb"));
+    
+    // now with prefix
+    query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 1);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(3, hits.length);
+    assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa"));
+    assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab"));
+    assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb"));
+    query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 2);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(3, hits.length);
+    assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa"));
+    assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab"));
+    assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb"));
+    query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 3);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(3, hits.length);
+    assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa"));
+    assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab"));
+    assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb"));
+    query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 4);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(2, hits.length);
+    assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa"));
+    assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab"));
+    query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 5);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+    
+
+    query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 0);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd"));
+    
+    // now with prefix
+    query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 1);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd"));
+    query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 2);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd"));
+    query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 3);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd"));
+    query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 4);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd"));
+    query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 5);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+    
+
+    // different field = no match:
+    query = new FuzzyQuery(new Term("anotherfield", "ddddX"), FuzzyQuery.defaultMinSimilarity, 0);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+
+    searcher.close();
+    reader.close();
+    directory.close();
+  }
+
+  public void testFuzzinessLong() throws Exception {
+    Directory directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory);
+    addDoc("aaaaaaa", writer);
+    addDoc("segment", writer);
+
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = newSearcher(reader);
+    writer.close();
+
+    FuzzyQuery query;
+    // not similar enough:
+    query = new FuzzyQuery(new Term("field", "xxxxx"), FuzzyQuery.defaultMinSimilarity, 0);   
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+    // edit distance to "aaaaaaa" = 3, this matches because the string is longer than
+    // in testDefaultFuzziness so a bigger difference is allowed:
+    query = new FuzzyQuery(new Term("field", "aaaaccc"), FuzzyQuery.defaultMinSimilarity, 0);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaaaa"));
+    
+    // now with prefix
+    query = new FuzzyQuery(new Term("field", "aaaaccc"), FuzzyQuery.defaultMinSimilarity, 1);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaaaa"));
+    query = new FuzzyQuery(new Term("field", "aaaaccc"), FuzzyQuery.defaultMinSimilarity, 4);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaaaa"));
+    query = new FuzzyQuery(new Term("field", "aaaaccc"), FuzzyQuery.defaultMinSimilarity, 5);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+
+    // no match, more than half of the characters is wrong:
+    query = new FuzzyQuery(new Term("field", "aaacccc"), FuzzyQuery.defaultMinSimilarity, 0);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+    
+    // now with prefix
+    query = new FuzzyQuery(new Term("field", "aaacccc"), FuzzyQuery.defaultMinSimilarity, 2);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+
+    // "student" and "stellent" are indeed similar to "segment" by default:
+    query = new FuzzyQuery(new Term("field", "student"), FuzzyQuery.defaultMinSimilarity, 0);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    query = new FuzzyQuery(new Term("field", "stellent"), FuzzyQuery.defaultMinSimilarity, 0);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    
+    // now with prefix
+    query = new FuzzyQuery(new Term("field", "student"), FuzzyQuery.defaultMinSimilarity, 1);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    query = new FuzzyQuery(new Term("field", "stellent"), FuzzyQuery.defaultMinSimilarity, 1);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    query = new FuzzyQuery(new Term("field", "student"), FuzzyQuery.defaultMinSimilarity, 2);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+    query = new FuzzyQuery(new Term("field", "stellent"), FuzzyQuery.defaultMinSimilarity, 2);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+    
+    // "student" doesn't match anymore thanks to increased minimum similarity:
+    query = new FuzzyQuery(new Term("field", "student"), 0.6f, 0);   
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+
+    try {
+      query = new FuzzyQuery(new Term("field", "student"), 1.1f);
+      fail("Expected IllegalArgumentException");
+    } catch (IllegalArgumentException e) {
+      // expecting exception
+    }
+    try {
+      query = new FuzzyQuery(new Term("field", "student"), -0.1f);
+      fail("Expected IllegalArgumentException");
+    } catch (IllegalArgumentException e) {
+      // expecting exception
+    }
+
+    searcher.close();
+    reader.close();
+    directory.close();
+  }
+
+  public void testTokenLengthOpt() throws IOException {
+    Directory directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory);
+    addDoc("12345678911", writer);
+    addDoc("segment", writer);
+
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = newSearcher(reader);
+    writer.close();
+
+    Query query;
+    // term not over 10 chars, so optimization shortcuts
+    query = new FuzzyQuery(new Term("field", "1234569"), 0.9f);
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+
+    // 10 chars, so no optimization
+    query = new FuzzyQuery(new Term("field", "1234567891"), 0.9f);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+    
+    // over 10 chars, so no optimization
+    query = new FuzzyQuery(new Term("field", "12345678911"), 0.9f);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+
+    // over 10 chars, no match
+    query = new FuzzyQuery(new Term("field", "sdfsdfsdfsdf"), 0.9f);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+    
+    searcher.close();
+    reader.close();
+    directory.close();
+  }
+  
+  /** Test the TopTermsBoostOnlyBooleanQueryRewrite rewrite method. */
+  public void testBoostOnlyRewrite() throws Exception {
+    Directory directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory);
+    addDoc("Lucene", writer);
+    addDoc("Lucene", writer);
+    addDoc("Lucenne", writer);
+
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = newSearcher(reader);
+    writer.close();
+    
+    FuzzyQuery query = new FuzzyQuery(new Term("field", "Lucene"));
+    query.setRewriteMethod(new MultiTermQuery.TopTermsBoostOnlyBooleanQueryRewrite(50));
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(3, hits.length);
+    // normally, 'Lucenne' would be the first result as IDF will skew the score.
+    assertEquals("Lucene", reader.document(hits[0].doc).get("field"));
+    assertEquals("Lucene", reader.document(hits[1].doc).get("field"));
+    assertEquals("Lucenne", reader.document(hits[2].doc).get("field"));
+    searcher.close();
+    reader.close();
+    directory.close();
+  }
+  
+  public void testGiga() throws Exception {
+
+    MockAnalyzer analyzer = new MockAnalyzer(random);
+    Directory index = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random, index);
+
+    addDoc("Lucene in Action", w);
+    addDoc("Lucene for Dummies", w);
+
+    //addDoc("Giga", w);
+    addDoc("Giga byte", w);
+
+    addDoc("ManagingGigabytesManagingGigabyte", w);
+    addDoc("ManagingGigabytesManagingGigabytes", w);
+
+    addDoc("The Art of Computer Science", w);
+    addDoc("J. K. Rowling", w);
+    addDoc("JK Rowling", w);
+    addDoc("Joanne K Roling", w);
+    addDoc("Bruce Willis", w);
+    addDoc("Willis bruce", w);
+    addDoc("Brute willis", w);
+    addDoc("B. willis", w);
+    IndexReader r = w.getReader();
+    w.close();
+
+    Query q = new QueryParser(TEST_VERSION_CURRENT, "field", analyzer).parse( "giga~0.9" );
+
+    // 3. search
+    IndexSearcher searcher = newSearcher(r);
+    ScoreDoc[] hits = searcher.search(q, 10).scoreDocs;
+    assertEquals(1, hits.length);
+    assertEquals("Giga byte", searcher.doc(hits[0].doc).get("field"));
+    searcher.close();
+    r.close();
+    index.close();
+  }
+
+  private void addDoc(String text, RandomIndexWriter writer) throws IOException {
+    Document doc = new Document();
+    doc.add(newField("field", text, Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestLocaleMethods.java b/lucene/backwards/src/test/org/apache/lucene/search/TestLocaleMethods.java
new file mode 100644
index 0000000..591ee13
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestLocaleMethods.java
@@ -0,0 +1,144 @@
+package org.apache.lucene.search;
+
+import java.io.IOException;
+import java.text.Collator;
+import java.util.Locale;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests Locale-based sort and range search
+ */
+public class TestLocaleMethods extends LuceneTestCase {
+  private static Locale locale;
+  private static Collator collator;
+  private static IndexSearcher searcher;
+  private static IndexReader reader;
+  private static Directory dir;
+  private static int numDocs;
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    locale = LuceneTestCase.randomLocale(random);
+    collator = Collator.getInstance(locale);
+    numDocs = 1000 * RANDOM_MULTIPLIER;
+    dir = newDirectory();
+    RandomIndexWriter iw = new RandomIndexWriter(random, dir);
+    for (int i = 0; i < numDocs; i++) {
+      Document doc = new Document();
+      String value = _TestUtil.randomUnicodeString(random);
+      Field field = newField("field", value, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
+      doc.add(field);
+      iw.addDocument(doc);
+    }
+    reader = iw.getReader();
+    iw.close();
+
+    searcher = newSearcher(reader);
+  }
+  
+  @AfterClass
+  public static void afterClass() throws Exception {
+    searcher.close();
+    reader.close();
+    dir.close();
+    locale = null;
+    collator = null;
+    searcher = null;
+    reader = null;
+    dir = null;
+  }
+  
+  public void testSort() throws Exception {
+    SortField sf = new SortField("field", locale);
+    TopFieldDocs docs = searcher.search(new MatchAllDocsQuery(), null, numDocs, new Sort(sf));
+    String prev = "";
+    for (ScoreDoc doc : docs.scoreDocs) {
+      String value = reader.document(doc.doc).get("field");
+      assertTrue(collator.compare(value, prev) >= 0);
+      prev = value;
+    }
+  }
+  
+  public void testSort2() throws Exception {
+    SortField sf = new SortField("field", new FieldComparatorSource() {
+      @Override
+      public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException {
+        return new FieldComparator.StringComparatorLocale(numHits, fieldname, locale);
+      }
+    });
+    TopFieldDocs docs = searcher.search(new MatchAllDocsQuery(), null, numDocs, new Sort(sf));
+    String prev = "";
+    for (ScoreDoc doc : docs.scoreDocs) {
+      String value = reader.document(doc.doc).get("field");
+      assertTrue(collator.compare(value, prev) >= 0);
+      prev = value;
+    }
+  }
+  
+  private void doTestRanges(String startPoint, String endPoint, Query query) throws Exception {
+    // positive test
+    TopDocs docs = searcher.search(query, numDocs);
+    for (ScoreDoc doc : docs.scoreDocs) {
+      String value = reader.document(doc.doc).get("field");
+      assertTrue(collator.compare(value, startPoint) >= 0);
+      assertTrue(collator.compare(value, endPoint) <= 0);
+    }
+    
+    // negative test
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new MatchAllDocsQuery(), Occur.SHOULD);
+    bq.add(query, Occur.MUST_NOT);
+    docs = searcher.search(bq, numDocs);
+    for (ScoreDoc doc : docs.scoreDocs) {
+      String value = reader.document(doc.doc).get("field");
+      assertTrue(collator.compare(value, startPoint) < 0 || collator.compare(value, endPoint) > 0);
+    }
+  }
+  
+  public void testRangeQuery() throws Exception {
+    int numQueries = 100*RANDOM_MULTIPLIER;
+    for (int i = 0; i < numQueries; i++) {
+      String startPoint = _TestUtil.randomUnicodeString(random);
+      String endPoint = _TestUtil.randomUnicodeString(random);
+      Query query = new TermRangeQuery("field", startPoint, endPoint, true, true, collator);
+      doTestRanges(startPoint, endPoint, query);
+    }
+  }
+  
+  public void testRangeFilter() throws Exception {
+    int numQueries = 100*RANDOM_MULTIPLIER;
+    for (int i = 0; i < numQueries; i++) {
+      String startPoint = _TestUtil.randomUnicodeString(random);
+      String endPoint = _TestUtil.randomUnicodeString(random);
+      Query query = new ConstantScoreQuery(new TermRangeFilter("field", startPoint, endPoint, true, true, collator));
+      doTestRanges(startPoint, endPoint, query);
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java
new file mode 100644
index 0000000..f938bb0
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search;
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.store.Directory;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Tests MatchAllDocsQuery.
+ *
+ */
+public class TestMatchAllDocsQuery extends LuceneTestCase {
+  private Analyzer analyzer = new MockAnalyzer(random);
+  
+  public void testQuery() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
+                                                               TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy()));
+    addDoc("one", iw, 1f);
+    addDoc("two", iw, 20f);
+    addDoc("three four", iw, 300f);
+    iw.close();
+
+    IndexReader ir = IndexReader.open(dir, false);
+    IndexSearcher is = newSearcher(ir);
+    ScoreDoc[] hits;
+
+    // assert with norms scoring turned off
+
+    hits = is.search(new MatchAllDocsQuery(), null, 1000).scoreDocs;
+    assertEquals(3, hits.length);
+    assertEquals("one", is.doc(hits[0].doc).get("key"));
+    assertEquals("two", is.doc(hits[1].doc).get("key"));
+    assertEquals("three four", is.doc(hits[2].doc).get("key"));
+
+    // assert with norms scoring turned on
+
+    MatchAllDocsQuery normsQuery = new MatchAllDocsQuery("key");
+    hits = is.search(normsQuery, null, 1000).scoreDocs;
+    assertEquals(3, hits.length);
+
+    assertEquals("three four", is.doc(hits[0].doc).get("key"));    
+    assertEquals("two", is.doc(hits[1].doc).get("key"));
+    assertEquals("one", is.doc(hits[2].doc).get("key"));
+
+    // change norm & retest
+    is.getIndexReader().setNorm(0, "key", is.getSimilarity().encodeNormValue(400f));
+    normsQuery = new MatchAllDocsQuery("key");
+    hits = is.search(normsQuery, null, 1000).scoreDocs;
+    assertEquals(3, hits.length);
+
+    assertEquals("one", is.doc(hits[0].doc).get("key"));
+    assertEquals("three four", is.doc(hits[1].doc).get("key"));    
+    assertEquals("two", is.doc(hits[2].doc).get("key"));
+    
+    // some artificial queries to trigger the use of skipTo():
+    
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new MatchAllDocsQuery(), BooleanClause.Occur.MUST);
+    bq.add(new MatchAllDocsQuery(), BooleanClause.Occur.MUST);
+    hits = is.search(bq, null, 1000).scoreDocs;
+    assertEquals(3, hits.length);
+
+    bq = new BooleanQuery();
+    bq.add(new MatchAllDocsQuery(), BooleanClause.Occur.MUST);
+    bq.add(new TermQuery(new Term("key", "three")), BooleanClause.Occur.MUST);
+    hits = is.search(bq, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+
+    // delete a document:
+    is.getIndexReader().deleteDocument(0);
+    hits = is.search(new MatchAllDocsQuery(), null, 1000).scoreDocs;
+    assertEquals(2, hits.length);
+    
+    // test parsable toString()
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "key", analyzer);
+    hits = is.search(qp.parse(new MatchAllDocsQuery().toString()), null, 1000).scoreDocs;
+    assertEquals(2, hits.length);
+
+    // test parsable toString() with non default boost
+    Query maq = new MatchAllDocsQuery();
+    maq.setBoost(2.3f);
+    Query pq = qp.parse(maq.toString());
+    hits = is.search(pq, null, 1000).scoreDocs;
+    assertEquals(2, hits.length);
+    
+    is.close();
+    ir.close();
+    dir.close();
+  }
+
+  public void testEquals() {
+    Query q1 = new MatchAllDocsQuery();
+    Query q2 = new MatchAllDocsQuery();
+    assertTrue(q1.equals(q2));
+    q1.setBoost(1.5f);
+    assertFalse(q1.equals(q2));
+  }
+  
+  private void addDoc(String text, IndexWriter iw, float boost) throws IOException {
+    Document doc = new Document();
+    Field f = newField("key", text, Field.Store.YES, Field.Index.ANALYZED);
+    f.setBoost(boost);
+    doc.add(f);
+    iw.addDocument(doc);
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
new file mode 100644
index 0000000..892c7e7
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
@@ -0,0 +1,588 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermEnum;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.queryParser.ParseException;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.search.Explanation.IDFExplanation;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.SimpleAnalyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.LinkedList;
+import java.io.Reader;
+
+/**
+ * This class tests the MultiPhraseQuery class.
+ * 
+ * 
+ */
+public class TestMultiPhraseQuery extends LuceneTestCase {
+  
+  public void testPhrasePrefix() throws IOException {
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, indexStore);
+    add("blueberry pie", writer);
+    add("blueberry strudel", writer);
+    add("blueberry pizza", writer);
+    add("blueberry chewing gum", writer);
+    add("bluebird pizza", writer);
+    add("bluebird foobar pizza", writer);
+    add("piccadilly circus", writer);
+    
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = newSearcher(reader);
+    
+    // search for "blueberry pi*":
+    MultiPhraseQuery query1 = new MultiPhraseQuery();
+    // search for "strawberry pi*":
+    MultiPhraseQuery query2 = new MultiPhraseQuery();
+    query1.add(new Term("body", "blueberry"));
+    query2.add(new Term("body", "strawberry"));
+
+    LinkedList<Term> termsWithPrefix = new LinkedList<Term>();
+    IndexReader ir = reader;
+
+    // this TermEnum gives "piccadilly", "pie" and "pizza".
+    String prefix = "pi";
+    TermEnum te = ir.terms(new Term("body", prefix));
+    do {
+        if (te.term().text().startsWith(prefix))
+        {
+            termsWithPrefix.add(te.term());
+        }
+    } while (te.next());
+
+    query1.add(termsWithPrefix.toArray(new Term[0]));
+    assertEquals("body:\"blueberry (piccadilly pie pizza)\"", query1.toString());
+    query2.add(termsWithPrefix.toArray(new Term[0]));
+    assertEquals("body:\"strawberry (piccadilly pie pizza)\"", query2.toString());
+
+    ScoreDoc[] result;
+    result = searcher.search(query1, null, 1000).scoreDocs;
+    assertEquals(2, result.length);
+    result = searcher.search(query2, null, 1000).scoreDocs;
+    assertEquals(0, result.length);
+
+    // search for "blue* pizza":
+    MultiPhraseQuery query3 = new MultiPhraseQuery();
+    termsWithPrefix.clear();
+    prefix = "blue";
+    te = ir.terms(new Term("body", prefix));
+    do {
+        if (te.term().text().startsWith(prefix))
+        {
+            termsWithPrefix.add(te.term());
+        }
+    } while (te.next());
+    query3.add(termsWithPrefix.toArray(new Term[0]));
+    query3.add(new Term("body", "pizza"));
+
+    result = searcher.search(query3, null, 1000).scoreDocs;
+    assertEquals(2, result.length); // blueberry pizza, bluebird pizza
+    assertEquals("body:\"(blueberry bluebird) pizza\"", query3.toString());
+
+    // test slop:
+    query3.setSlop(1);
+    result = searcher.search(query3, null, 1000).scoreDocs;
+
+    // just make sure no exc:
+    searcher.explain(query3, 0);
+
+    assertEquals(3, result.length); // blueberry pizza, bluebird pizza, bluebird foobar pizza
+
+    MultiPhraseQuery query4 = new MultiPhraseQuery();
+    try {
+      query4.add(new Term("field1", "foo"));
+      query4.add(new Term("field2", "foobar"));
+      fail();
+    } catch(IllegalArgumentException e) {
+      // okay, all terms must belong to the same field
+    }
+    
+    writer.close();
+    searcher.close();
+    reader.close();
+    indexStore.close();
+  }
+
+  // LUCENE-2580
+  public void testTall() throws IOException {
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, indexStore);
+    add("blueberry chocolate pie", writer);
+    add("blueberry chocolate tart", writer);
+    IndexReader r = writer.getReader();
+    writer.close();
+
+    IndexSearcher searcher = newSearcher(r);
+    MultiPhraseQuery q = new MultiPhraseQuery();
+    q.add(new Term("body", "blueberry"));
+    q.add(new Term("body", "chocolate"));
+    q.add(new Term[] {new Term("body", "pie"), new Term("body", "tart")});
+    assertEquals(2, searcher.search(q, 1).totalHits);
+    searcher.close();
+    r.close();
+    indexStore.close();
+  }
+  
+  private void add(String s, RandomIndexWriter writer) throws IOException {
+    Document doc = new Document();
+    doc.add(newField("body", s, Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+  }
+  
+  public void testBooleanQueryContainingSingleTermPrefixQuery()
+      throws IOException {
+    // this tests against bug 33161 (now fixed)
+    // In order to cause the bug, the outer query must have more than one term
+    // and all terms required.
+    // The contained PhraseMultiQuery must contain exactly one term array.
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, indexStore);
+    add("blueberry pie", writer);
+    add("blueberry chewing gum", writer);
+    add("blue raspberry pie", writer);
+    
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = newSearcher(reader);
+    // This query will be equivalent to +body:pie +body:"blue*"
+    BooleanQuery q = new BooleanQuery();
+    q.add(new TermQuery(new Term("body", "pie")), BooleanClause.Occur.MUST);
+    
+    MultiPhraseQuery trouble = new MultiPhraseQuery();
+    trouble.add(new Term[] {new Term("body", "blueberry"),
+        new Term("body", "blue")});
+    q.add(trouble, BooleanClause.Occur.MUST);
+    
+    // exception will be thrown here without fix
+    ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
+    
+    assertEquals("Wrong number of hits", 2, hits.length);
+    
+    // just make sure no exc:
+    searcher.explain(q, 0);
+    
+    writer.close();
+    searcher.close();
+    reader.close();
+    indexStore.close();
+  }
+  
+  public void testPhrasePrefixWithBooleanQuery() throws IOException {
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, indexStore);
+    add("This is a test", "object", writer);
+    add("a note", "note", writer);
+    
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = newSearcher(reader);
+    
+    // This query will be equivalent to +type:note +body:"a t*"
+    BooleanQuery q = new BooleanQuery();
+    q.add(new TermQuery(new Term("type", "note")), BooleanClause.Occur.MUST);
+    
+    MultiPhraseQuery trouble = new MultiPhraseQuery();
+    trouble.add(new Term("body", "a"));
+    trouble
+        .add(new Term[] {new Term("body", "test"), new Term("body", "this")});
+    q.add(trouble, BooleanClause.Occur.MUST);
+    
+    // exception will be thrown here without fix for #35626:
+    ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals("Wrong number of hits", 0, hits.length);
+    writer.close();
+    searcher.close();
+    reader.close();
+    indexStore.close();
+  }
+  
+  public void testNoDocs() throws Exception {
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, indexStore);
+    add("a note", "note", writer);
+    
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = newSearcher(reader);
+    
+    MultiPhraseQuery q = new MultiPhraseQuery();
+    q.add(new Term("body", "a"));
+    q.add(new Term[] {new Term("body", "nope"), new Term("body", "nope")});
+    assertEquals("Wrong number of hits", 0,
+        searcher.search(q, null, 1).totalHits);
+    
+    // just make sure no exc:
+    searcher.explain(q, 0);
+    
+    writer.close();
+    searcher.close();
+    reader.close();
+    indexStore.close();
+  }
+  
+  public void testHashCodeAndEquals() {
+    MultiPhraseQuery query1 = new MultiPhraseQuery();
+    MultiPhraseQuery query2 = new MultiPhraseQuery();
+    
+    assertEquals(query1.hashCode(), query2.hashCode());
+    assertEquals(query1, query2);
+    
+    Term term1 = new Term("someField", "someText");
+    
+    query1.add(term1);
+    query2.add(term1);
+    
+    assertEquals(query1.hashCode(), query2.hashCode());
+    assertEquals(query1, query2);
+    
+    Term term2 = new Term("someField", "someMoreText");
+    
+    query1.add(term2);
+    
+    assertFalse(query1.hashCode() == query2.hashCode());
+    assertFalse(query1.equals(query2));
+    
+    query2.add(term2);
+    
+    assertEquals(query1.hashCode(), query2.hashCode());
+    assertEquals(query1, query2);
+  }
+  
+  private void add(String s, String type, RandomIndexWriter writer)
+      throws IOException {
+    Document doc = new Document();
+    doc.add(newField("body", s, Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(newField("type", type, Field.Store.YES, Field.Index.NOT_ANALYZED));
+    writer.addDocument(doc);
+  }
+  
+  // LUCENE-2526
+  public void testEmptyToString() {
+    new MultiPhraseQuery().toString();
+  }
+  
+  public void testCustomIDF() throws Exception {
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, indexStore);
+    add("This is a test", "object", writer);
+    add("a note", "note", writer);
+    
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = newSearcher(reader);
+    searcher.setSimilarity(new DefaultSimilarity() {
+      
+      @Override
+      public IDFExplanation idfExplain(Collection<Term> terms,
+          Searcher searcher) throws IOException {
+        return new IDFExplanation() {
+
+          @Override
+          public float getIdf() {
+            return 10f;
+          }
+
+          @Override
+          public String explain() {
+            return "just a test";
+          }
+          
+        };
+      }   
+    });
+    
+    MultiPhraseQuery query = new MultiPhraseQuery();
+    query.add(new Term[] { new Term("body", "this"), new Term("body", "that") });
+    query.add(new Term("body", "is"));
+    Weight weight = query.createWeight(searcher);
+    assertEquals(10f * 10f, weight.sumOfSquaredWeights(), 0.001f);
+
+    writer.close();
+    searcher.close();
+    reader.close();
+    indexStore.close();
+  }
+
+  private static class TokenAndPos {
+    public final String token;
+    public final int pos;
+    public TokenAndPos(String token, int pos) {
+      this.token = token;
+      this.pos = pos;
+    }
+  }
+
+  private static class CannedAnalyzer extends Analyzer {
+    private final TokenAndPos[] tokens;
+    
+    public CannedAnalyzer(TokenAndPos[] tokens) {
+      this.tokens = tokens;
+    }
+
+    @Override
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      return new CannedTokenizer(tokens);
+    }
+  }
+
+  private static class CannedTokenizer extends Tokenizer {
+    private final TokenAndPos[] tokens;
+    private int upto = 0;
+    private int lastPos = 0;
+    private final TermAttribute termAtt = addAttribute(TermAttribute.class);
+    private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+
+    public CannedTokenizer(TokenAndPos[] tokens) {
+      this.tokens = tokens;
+    }
+
+    @Override
+    public final boolean incrementToken() throws IOException {
+      clearAttributes();      
+      if (upto < tokens.length) {
+        final TokenAndPos token = tokens[upto++];
+        termAtt.setTermBuffer(token.token);
+        posIncrAtt.setPositionIncrement(token.pos - lastPos);
+        lastPos = token.pos;
+        return true;
+      } else {
+        return false;
+      }
+    }
+
+    @Override
+    public void reset() throws IOException {
+      super.reset();
+      this.upto = 0;
+      this.lastPos = 0;
+    }
+  }
+
+  public void testZeroPosIncr() throws IOException {
+    Directory dir = new RAMDirectory();
+    final TokenAndPos[] tokens = new TokenAndPos[3];
+    tokens[0] = new TokenAndPos("a", 0);
+    tokens[1] = new TokenAndPos("b", 0);
+    tokens[2] = new TokenAndPos("c", 0);
+
+    IndexWriter writer = new IndexWriter(dir, new CannedAnalyzer(tokens), true, IndexWriter.MaxFieldLength.LIMITED);
+    Document doc = new Document();
+    doc.add(new Field("field", "", Field.Store.NO, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    writer.addDocument(doc);
+    IndexReader r = writer.getReader();
+    writer.close();
+    IndexSearcher s = new IndexSearcher(r);
+    MultiPhraseQuery mpq = new MultiPhraseQuery();
+    //mpq.setSlop(1);
+
+    // NOTE: not great that if we do the else clause here we
+    // get different scores!  MultiPhraseQuery counts that
+    // phrase as occurring twice per doc (it should be 1, I
+    // think?).  This is because MultipleTermPositions is able to
+    // return the same position more than once (0, in this
+    // case):
+    if (true) {
+      mpq.add(new Term[] {new Term("field", "b"), new Term("field", "c")}, 0);
+      mpq.add(new Term[] {new Term("field", "a")}, 0);
+    } else {
+      mpq.add(new Term[] {new Term("field", "a")}, 0);
+      mpq.add(new Term[] {new Term("field", "b"), new Term("field", "c")}, 0);
+    }
+    TopDocs hits = s.search(mpq, 2);
+    assertEquals(2, hits.totalHits);
+    assertEquals(hits.scoreDocs[0].score, hits.scoreDocs[1].score, 1e-5);
+    /*
+    for(int hit=0;hit<hits.totalHits;hit++) {
+      ScoreDoc sd = hits.scoreDocs[hit];
+      System.out.println("  hit doc=" + sd.doc + " score=" + sd.score);
+    }
+    */
+    r.close();
+    dir.close();
+  }
+
+  private final static TokenAndPos[] INCR_0_DOC_TOKENS = new TokenAndPos[] {
+      new TokenAndPos("x", 0),
+      new TokenAndPos("a", 1),
+      new TokenAndPos("1", 1),
+      new TokenAndPos("m", 2), // not existing, relying on slop=2
+      new TokenAndPos("b", 3),
+      new TokenAndPos("1", 3),
+      new TokenAndPos("n", 4), // not existing, relying on slop=2
+      new TokenAndPos("c", 5),
+      new TokenAndPos("y", 6)
+  };
+  
+  private final static TokenAndPos[] INCR_0_QUERY_TOKENS_AND = new TokenAndPos[] {
+      new TokenAndPos("a", 0),
+      new TokenAndPos("1", 0),
+      new TokenAndPos("b", 1),
+      new TokenAndPos("1", 1),
+      new TokenAndPos("c", 2)
+  };
+  
+  private final static TokenAndPos[][] INCR_0_QUERY_TOKENS_AND_OR_MATCH = new TokenAndPos[][] {
+      { new TokenAndPos("a", 0) },
+      { new TokenAndPos("x", 0), new TokenAndPos("1", 0) },
+      { new TokenAndPos("b", 1) },
+      { new TokenAndPos("x", 1), new TokenAndPos("1", 1) },
+      { new TokenAndPos("c", 2) }
+  };
+  
+  private final static TokenAndPos[][] INCR_0_QUERY_TOKENS_AND_OR_NO_MATCHN = new TokenAndPos[][] {
+      { new TokenAndPos("x", 0) },
+      { new TokenAndPos("a", 0), new TokenAndPos("1", 0) },
+      { new TokenAndPos("x", 1) },
+      { new TokenAndPos("b", 1), new TokenAndPos("1", 1) },
+      { new TokenAndPos("c", 2) }
+  };
+  
+  /**
+   * using query parser, MPQ will be created, and will not be strict about having all query terms 
+   * in each position - one of each position is sufficient (OR logic)
+   */
+  public void testZeroPosIncrSloppyParsedAnd() throws IOException, ParseException {
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new CannedAnalyzer(INCR_0_QUERY_TOKENS_AND));
+    final Query q = qp.parse("\"this text is acually ignored\"");
+    assertTrue("wrong query type!", q instanceof MultiPhraseQuery);
+    doTestZeroPosIncrSloppy(q, 0);
+    ((MultiPhraseQuery) q).setSlop(1);
+    doTestZeroPosIncrSloppy(q, 0);
+    ((MultiPhraseQuery) q).setSlop(2);
+    doTestZeroPosIncrSloppy(q, 1);
+  }
+  
+  private void doTestZeroPosIncrSloppy(Query q, int nExpected) throws IOException {
+    Directory dir = newDirectory(); // random dir
+    IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT, new CannedAnalyzer(INCR_0_DOC_TOKENS));
+    IndexWriter writer = new IndexWriter(dir, cfg);
+    Document doc = new Document();
+    doc.add(new Field("field", "", Field.Store.NO, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    IndexReader r = IndexReader.open(writer,false);
+    writer.close();
+    IndexSearcher s = new IndexSearcher(r);
+    
+    if (VERBOSE) {
+      System.out.println("QUERY=" + q);
+    }
+    
+    TopDocs hits = s.search(q, 1);
+    assertEquals("wrong number of results", nExpected, hits.totalHits);
+    
+    if (VERBOSE) {
+      for(int hit=0;hit<hits.totalHits;hit++) {
+        ScoreDoc sd = hits.scoreDocs[hit];
+        System.out.println("  hit doc=" + sd.doc + " score=" + sd.score);
+      }
+    }
+    
+    r.close();
+    dir.close();
+  }
+
+  /**
+   * PQ AND Mode - Manually creating a phrase query
+   */
+  public void testZeroPosIncrSloppyPqAnd() throws IOException, ParseException {
+    final PhraseQuery pq = new PhraseQuery();
+    for (TokenAndPos tap : INCR_0_QUERY_TOKENS_AND) {
+      pq.add(new Term("field",tap.token), tap.pos);
+    }
+    doTestZeroPosIncrSloppy(pq, 0);
+    pq.setSlop(1);
+    doTestZeroPosIncrSloppy(pq, 0);
+    pq.setSlop(2);
+    doTestZeroPosIncrSloppy(pq, 1);
+  }
+
+  /**
+   * MPQ AND Mode - Manually creating a multiple phrase query
+   */
+  public void testZeroPosIncrSloppyMpqAnd() throws IOException, ParseException {
+    final MultiPhraseQuery mpq = new MultiPhraseQuery();
+    for (TokenAndPos tap : INCR_0_QUERY_TOKENS_AND) {
+      mpq.add(new Term[]{new Term("field",tap.token)}, tap.pos); //AND logic
+    }
+    doTestZeroPosIncrSloppy(mpq, 0);
+    mpq.setSlop(1);
+    doTestZeroPosIncrSloppy(mpq, 0);
+    mpq.setSlop(2);
+    doTestZeroPosIncrSloppy(mpq, 1);
+  }
+
+  /**
+   * MPQ Combined AND OR Mode - Manually creating a multiple phrase query
+   */
+  public void testZeroPosIncrSloppyMpqAndOrMatch() throws IOException, ParseException {
+    final MultiPhraseQuery mpq = new MultiPhraseQuery();
+    for (TokenAndPos tap[] : INCR_0_QUERY_TOKENS_AND_OR_MATCH) {
+      Term[] terms = tapTerms(tap);
+      final int pos = tap[0].pos;
+      mpq.add(terms, pos); //AND logic in pos, OR across lines 
+    }
+    doTestZeroPosIncrSloppy(mpq, 0);
+    mpq.setSlop(1);
+    doTestZeroPosIncrSloppy(mpq, 0);
+    mpq.setSlop(2);
+    doTestZeroPosIncrSloppy(mpq, 1);
+  }
+
+  /**
+   * MPQ Combined AND OR Mode - Manually creating a multiple phrase query - with no match
+   */
+  public void testZeroPosIncrSloppyMpqAndOrNoMatch() throws IOException, ParseException {
+    final MultiPhraseQuery mpq = new MultiPhraseQuery();
+    for (TokenAndPos tap[] : INCR_0_QUERY_TOKENS_AND_OR_NO_MATCHN) {
+      Term[] terms = tapTerms(tap);
+      final int pos = tap[0].pos;
+      mpq.add(terms, pos); //AND logic in pos, OR across lines 
+    }
+    doTestZeroPosIncrSloppy(mpq, 0);
+    mpq.setSlop(2);
+    doTestZeroPosIncrSloppy(mpq, 0);
+  }
+
+  private Term[] tapTerms(TokenAndPos[] tap) {
+    Term[] terms = new Term[tap.length];
+    for (int i=0; i<terms.length; i++) {
+      terms[i] = new Term("field",tap[i].token);
+    }
+    return terms;
+  }
+  
+}
+
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestMultiSearcher.java b/lucene/backwards/src/test/org/apache/lucene/search/TestMultiSearcher.java
new file mode 100644
index 0000000..c5b4546
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestMultiSearcher.java
@@ -0,0 +1,457 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.KeywordAnalyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SetBasedFieldSelector;
+
+import org.apache.lucene.index.FieldInvertState;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.store.Directory;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+
+/**
+ * Tests {@link MultiSearcher} class.
+ */
+public class TestMultiSearcher extends LuceneTestCase
+{
+
+	/**
+	 * ReturnS a new instance of the concrete MultiSearcher class
+	 * used in this test.
+	 */
+	protected MultiSearcher getMultiSearcherInstance(Searcher[] searchers) throws IOException {
+		return new MultiSearcher(searchers);
+	}
+
+    public void testEmptyIndex() throws Exception {
+        // creating two directories for indices
+        Directory indexStoreA = newDirectory();
+        Directory indexStoreB = newDirectory();
+
+        // creating a document to store
+        Document lDoc = new Document();
+        lDoc.add(newField("fulltext", "Once upon a time.....", Field.Store.YES, Field.Index.ANALYZED));
+        lDoc.add(newField("id", "doc1", Field.Store.YES, Field.Index.NOT_ANALYZED));
+        lDoc.add(newField("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
+
+        // creating a document to store
+        Document lDoc2 = new Document();
+        lDoc2.add(newField("fulltext", "in a galaxy far far away.....",
+            Field.Store.YES, Field.Index.ANALYZED));
+        lDoc2.add(newField("id", "doc2", Field.Store.YES, Field.Index.NOT_ANALYZED));
+        lDoc2.add(newField("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
+
+        // creating a document to store
+        Document lDoc3 = new Document();
+        lDoc3.add(newField("fulltext", "a bizarre bug manifested itself....",
+            Field.Store.YES, Field.Index.ANALYZED));
+        lDoc3.add(newField("id", "doc3", Field.Store.YES, Field.Index.NOT_ANALYZED));
+        lDoc3.add(newField("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
+
+        // creating an index writer for the first index
+        IndexWriter writerA = new IndexWriter(indexStoreA, newIndexWriterConfig(TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT)));
+        // creating an index writer for the second index, but writing nothing
+        IndexWriter writerB = new IndexWriter(indexStoreB, newIndexWriterConfig(TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT)));
+
+        //--------------------------------------------------------------------
+        // scenario 1
+        //--------------------------------------------------------------------
+
+        // writing the documents to the first index
+        writerA.addDocument(lDoc);
+        writerA.addDocument(lDoc2);
+        writerA.addDocument(lDoc3);
+        writerA.optimize();
+        writerA.close();
+
+        // closing the second index
+        writerB.close();
+
+        // creating the query
+        QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "fulltext", new StandardAnalyzer(TEST_VERSION_CURRENT));
+        Query query = parser.parse("handle:1");
+
+        // building the searchables
+        Searcher[] searchers = new Searcher[2];
+        // VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index
+        searchers[0] = new IndexSearcher(indexStoreB, true);
+        searchers[1] = new IndexSearcher(indexStoreA, true);
+        // creating the multiSearcher
+        Searcher mSearcher = getMultiSearcherInstance(searchers);
+        // performing the search
+        ScoreDoc[] hits = mSearcher.search(query, null, 1000).scoreDocs;
+
+        assertEquals(3, hits.length);
+
+        // iterating over the hit documents
+        for (int i = 0; i < hits.length; i++) {
+          mSearcher.doc(hits[i].doc);
+        }
+        mSearcher.close();
+
+
+        //--------------------------------------------------------------------
+        // scenario 2
+        //--------------------------------------------------------------------
+
+        // adding one document to the empty index
+        writerB = new IndexWriter(indexStoreB, newIndexWriterConfig(
+            TEST_VERSION_CURRENT, 
+                new StandardAnalyzer(TEST_VERSION_CURRENT))
+                .setOpenMode(OpenMode.APPEND));
+        writerB.addDocument(lDoc);
+        writerB.optimize();
+        writerB.close();
+
+        // building the searchables
+        Searcher[] searchers2 = new Searcher[2];
+        // VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index
+        searchers2[0] = new IndexSearcher(indexStoreB, true);
+        searchers2[1] = new IndexSearcher(indexStoreA, true);
+        // creating the mulitSearcher
+        MultiSearcher mSearcher2 = getMultiSearcherInstance(searchers2);
+        // performing the same search
+        ScoreDoc[] hits2 = mSearcher2.search(query, null, 1000).scoreDocs;
+
+        assertEquals(4, hits2.length);
+
+        // iterating over the hit documents
+        for (int i = 0; i < hits2.length; i++) {
+          // no exception should happen at this point
+          mSearcher2.doc(hits2[i].doc);
+        }
+
+        // test the subSearcher() method:
+        Query subSearcherQuery = parser.parse("id:doc1");
+        hits2 = mSearcher2.search(subSearcherQuery, null, 1000).scoreDocs;
+        assertEquals(2, hits2.length);
+        assertEquals(0, mSearcher2.subSearcher(hits2[0].doc));   // hit from searchers2[0]
+        assertEquals(1, mSearcher2.subSearcher(hits2[1].doc));   // hit from searchers2[1]
+        subSearcherQuery = parser.parse("id:doc2");
+        hits2 = mSearcher2.search(subSearcherQuery, null, 1000).scoreDocs;
+        assertEquals(1, hits2.length);
+        assertEquals(1, mSearcher2.subSearcher(hits2[0].doc));   // hit from searchers2[1]
+        mSearcher2.close();
+
+        //--------------------------------------------------------------------
+        // scenario 3
+        //--------------------------------------------------------------------
+
+        // deleting the document just added, this will cause a different exception to take place
+        Term term = new Term("id", "doc1");
+        IndexReader readerB = IndexReader.open(indexStoreB, false);
+        readerB.deleteDocuments(term);
+        readerB.close();
+
+        // optimizing the index with the writer
+        writerB = new IndexWriter(indexStoreB, new IndexWriterConfig(
+            TEST_VERSION_CURRENT, 
+                new StandardAnalyzer(TEST_VERSION_CURRENT))
+                .setOpenMode(OpenMode.APPEND));
+        writerB.optimize();
+        writerB.close();
+
+        // building the searchables
+        Searcher[] searchers3 = new Searcher[2];
+
+        searchers3[0] = new IndexSearcher(indexStoreB, true);
+        searchers3[1] = new IndexSearcher(indexStoreA, true);
+        // creating the mulitSearcher
+        Searcher mSearcher3 = getMultiSearcherInstance(searchers3);
+        // performing the same search
+        ScoreDoc[] hits3 = mSearcher3.search(query, null, 1000).scoreDocs;
+
+        assertEquals(3, hits3.length);
+
+        // iterating over the hit documents
+        for (int i = 0; i < hits3.length; i++) {
+          mSearcher3.doc(hits3[i].doc);
+        }
+        mSearcher3.close();
+        indexStoreA.close();
+        indexStoreB.close();
+    }
+    
+    private Document createDocument(String contents1, String contents2) {
+        Document document=new Document();
+        
+        document.add(newField("contents", contents1, Field.Store.YES, Field.Index.NOT_ANALYZED));
+      document.add(newField("other", "other contents", Field.Store.YES, Field.Index.NOT_ANALYZED));
+        if (contents2!=null) {
+            document.add(newField("contents", contents2, Field.Store.YES, Field.Index.NOT_ANALYZED));
+        }
+        
+        return document;
+    }
+    
+    private void initIndex(Random random, Directory directory, int nDocs, boolean create, String contents2) throws IOException {
+        IndexWriter indexWriter=null;
+        
+        try {
+          indexWriter = new IndexWriter(directory, LuceneTestCase.newIndexWriterConfig(random,
+              TEST_VERSION_CURRENT, new KeywordAnalyzer()).setOpenMode(
+                  create ? OpenMode.CREATE : OpenMode.APPEND));
+            
+            for (int i=0; i<nDocs; i++) {
+                indexWriter.addDocument(createDocument("doc" + i, contents2));
+            }
+        } finally {
+            if (indexWriter!=null) {
+                indexWriter.close();
+            }
+        }
+    }
+
+  public void testFieldSelector() throws Exception {
+    Directory ramDirectory1, ramDirectory2;
+    IndexSearcher indexSearcher1, indexSearcher2;
+
+    ramDirectory1 = newDirectory();
+    ramDirectory2 = newDirectory();
+    Query query = new TermQuery(new Term("contents", "doc0"));
+
+    // Now put the documents in a different index
+    initIndex(random, ramDirectory1, 10, true, null); // documents with a single token "doc0", "doc1", etc...
+    initIndex(random, ramDirectory2, 10, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
+
+    indexSearcher1 = new IndexSearcher(ramDirectory1, true);
+    indexSearcher2 = new IndexSearcher(ramDirectory2, true);
+
+    MultiSearcher searcher = getMultiSearcherInstance(new Searcher[]{indexSearcher1, indexSearcher2});
+    assertTrue("searcher is null and it shouldn't be", searcher != null);
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertTrue("hits is null and it shouldn't be", hits != null);
+    assertTrue(hits.length + " does not equal: " + 2, hits.length == 2);
+    Document document = searcher.doc(hits[0].doc);
+    assertTrue("document is null and it shouldn't be", document != null);
+    assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 2, document.getFields().size() == 2);
+    //Should be one document from each directory
+    //they both have two fields, contents and other
+    Set<String> ftl = new HashSet<String>();
+    ftl.add("other");
+    SetBasedFieldSelector fs = new SetBasedFieldSelector(ftl, Collections. <String> emptySet());
+    document = searcher.doc(hits[0].doc, fs);
+    assertTrue("document is null and it shouldn't be", document != null);
+    assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 1, document.getFields().size() == 1);
+    String value = document.get("contents");
+    assertTrue("value is not null and it should be", value == null);
+    value = document.get("other");
+    assertTrue("value is null and it shouldn't be", value != null);
+    ftl.clear();
+    ftl.add("contents");
+    fs = new SetBasedFieldSelector(ftl, Collections. <String> emptySet());
+    document = searcher.doc(hits[1].doc, fs);
+    value = document.get("contents");
+    assertTrue("value is null and it shouldn't be", value != null);    
+    value = document.get("other");
+    assertTrue("value is not null and it should be", value == null);
+    indexSearcher1.close();
+    indexSearcher2.close();
+    ramDirectory1.close();
+    ramDirectory2.close();
+    searcher.close();
+  }
+
+  /* uncomment this when the highest score is always normalized to 1.0, even when it was < 1.0
+ public void testNormalization1() throws IOException {
+     testNormalization(1, "Using 1 document per index:");
+ }
+  */
+    
+    public void testNormalization10() throws IOException {
+        testNormalization(10, "Using 10 documents per index:");
+    }
+    
+    private void testNormalization(int nDocs, String message) throws IOException {
+        Query query=new TermQuery(new Term("contents", "doc0"));
+        
+        Directory ramDirectory1;
+        IndexSearcher indexSearcher1;
+        ScoreDoc[] hits;
+        
+        ramDirectory1=newDirectory();
+        
+        // First put the documents in the same index
+        initIndex(random, ramDirectory1, nDocs, true, null); // documents with a single token "doc0", "doc1", etc...
+        initIndex(random, ramDirectory1, nDocs, false, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
+        
+        indexSearcher1=new IndexSearcher(ramDirectory1, true);
+        indexSearcher1.setDefaultFieldSortScoring(true, true);
+        
+        hits=indexSearcher1.search(query, null, 1000).scoreDocs;
+        
+        assertEquals(message, 2, hits.length);
+        
+        // Store the scores for use later
+        float[] scores={ hits[0].score, hits[1].score };
+        
+        assertTrue(message, scores[0] > scores[1]);
+        
+        indexSearcher1.close();
+        ramDirectory1.close();
+        hits=null;
+        
+        
+        
+        Directory ramDirectory2;
+        IndexSearcher indexSearcher2;
+        
+        ramDirectory1=newDirectory();
+        ramDirectory2=newDirectory();
+        
+        // Now put the documents in a different index
+        initIndex(random, ramDirectory1, nDocs, true, null); // documents with a single token "doc0", "doc1", etc...
+        initIndex(random, ramDirectory2, nDocs, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
+        
+        indexSearcher1=new IndexSearcher(ramDirectory1, true);
+        indexSearcher1.setDefaultFieldSortScoring(true, true);
+        indexSearcher2=new IndexSearcher(ramDirectory2, true);
+        indexSearcher2.setDefaultFieldSortScoring(true, true);
+        
+        Searcher searcher=getMultiSearcherInstance(new Searcher[] { indexSearcher1, indexSearcher2 });
+        
+        hits=searcher.search(query, null, 1000).scoreDocs;
+        
+        assertEquals(message, 2, hits.length);
+        
+        // The scores should be the same (within reason)
+        assertEquals(message, scores[0], hits[0].score, 1e-6); // This will a document from ramDirectory1
+        assertEquals(message, scores[1], hits[1].score, 1e-6); // This will a document from ramDirectory2
+        
+        
+        
+        // Adding a Sort.RELEVANCE object should not change anything
+        hits=searcher.search(query, null, 1000, Sort.RELEVANCE).scoreDocs;
+        
+        assertEquals(message, 2, hits.length);
+        
+        assertEquals(message, scores[0], hits[0].score, 1e-6); // This will a document from ramDirectory1
+        assertEquals(message, scores[1], hits[1].score, 1e-6); // This will a document from ramDirectory2
+        
+        searcher.close();
+        
+        ramDirectory1.close();
+        ramDirectory2.close();
+    }
+    
+    /**
+     * test that custom similarity is in effect when using MultiSearcher (LUCENE-789).
+     * @throws IOException 
+     */
+    public void testCustomSimilarity () throws IOException {
+        Directory dir = newDirectory();
+        initIndex(random, dir, 10, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
+        IndexSearcher srchr = new IndexSearcher(dir, true);
+        MultiSearcher msrchr = getMultiSearcherInstance(new Searcher[]{srchr});
+        
+        Similarity customSimilarity = new DefaultSimilarity() {
+            // overide all
+            @Override
+            public float idf(int docFreq, int numDocs) { return 100.0f; }
+            @Override
+            public float coord(int overlap, int maxOverlap) { return 1.0f; }
+            @Override
+            public float computeNorm(String fieldName, FieldInvertState state) { return state.getBoost(); }
+            @Override
+            public float queryNorm(float sumOfSquaredWeights) { return 1.0f; }
+            @Override
+            public float sloppyFreq(int distance) { return 1.0f; }
+            @Override
+            public float tf(float freq) { return 1.0f; }
+        };
+        
+        srchr.setSimilarity(customSimilarity);
+        msrchr.setSimilarity(customSimilarity);
+  
+        Query query=new TermQuery(new Term("contents", "doc0"));
+  
+        // Get a score from IndexSearcher
+        TopDocs topDocs = srchr.search(query, null, 1);
+        float score1 = topDocs.getMaxScore();
+        
+        // Get the score from MultiSearcher
+        topDocs = msrchr.search(query, null, 1);
+        float scoreN = topDocs.getMaxScore();
+        
+        // The scores from the IndexSearcher and Multisearcher should be the same
+        // if the same similarity is used.
+        assertEquals("MultiSearcher score must be equal to single searcher score!", score1, scoreN, 1e-6);
+        msrchr.close();
+        srchr.close();
+        dir.close();
+    }
+    
+    public void testDocFreq() throws IOException{
+      Directory dir1 = newDirectory();
+      Directory dir2 = newDirectory();
+
+      initIndex(random, dir1, 10, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
+      initIndex(random, dir2, 5, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
+      IndexSearcher searcher1 = new IndexSearcher(dir1, true);
+      IndexSearcher searcher2 = new IndexSearcher(dir2, true);
+      
+      MultiSearcher multiSearcher = getMultiSearcherInstance(new Searcher[]{searcher1, searcher2});
+      assertEquals(15, multiSearcher.docFreq(new Term("contents","x")));
+      multiSearcher.close();
+      searcher1.close();
+      searcher2.close();
+      dir1.close();
+      dir2.close();
+    }
+    
+    public void testCreateDocFrequencyMap() throws IOException{
+      Directory dir1 = newDirectory();
+      Directory dir2 = newDirectory();
+      Term template = new Term("contents") ;
+      String[] contents  = {"a", "b", "c"};
+      HashSet<Term> termsSet = new HashSet<Term>();
+      for (int i = 0; i < contents.length; i++) {
+        initIndex(random, dir1, i+10, i==0, contents[i]); 
+        initIndex(random, dir2, i+5, i==0, contents[i]);
+        termsSet.add(template.createTerm(contents[i]));
+      }
+      IndexSearcher searcher1 = new IndexSearcher(dir1, true);
+      IndexSearcher searcher2 = new IndexSearcher(dir2, true);
+      MultiSearcher multiSearcher = getMultiSearcherInstance(new Searcher[]{searcher1, searcher2});
+      Map<Term,Integer> docFrequencyMap = multiSearcher.createDocFrequencyMap(termsSet);
+      assertEquals(3, docFrequencyMap.size());
+      for (int i = 0; i < contents.length; i++) {
+        assertEquals(Integer.valueOf((i*2) +15), docFrequencyMap.get(template.createTerm(contents[i])));
+      }
+      multiSearcher.close();
+      searcher1.close();
+      searcher2.close();
+      dir1.close();
+      dir2.close();
+    }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestMultiSearcherRanking.java b/lucene/backwards/src/test/org/apache/lucene/search/TestMultiSearcherRanking.java
new file mode 100644
index 0000000..51a33f4
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestMultiSearcherRanking.java
@@ -0,0 +1,173 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.queryParser.ParseException;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.store.Directory;
+import java.io.IOException;
+
+/**
+ * Tests {@link MultiSearcher} ranking, i.e. makes sure this bug is fixed:
+ * http://issues.apache.org/bugzilla/show_bug.cgi?id=31841
+ *
+ */
+public class TestMultiSearcherRanking extends LuceneTestCase {
+  
+  private final String FIELD_NAME = "body";
+  private Searcher multiSearcher;
+  private Searcher singleSearcher;
+
+  public void testOneTermQuery() throws IOException, ParseException {
+    checkQuery("three");
+  }
+
+  public void testTwoTermQuery() throws IOException, ParseException {
+    checkQuery("three foo");
+  }
+
+  public void testPrefixQuery() throws IOException, ParseException {
+    checkQuery("multi*");
+  }
+
+  public void testFuzzyQuery() throws IOException, ParseException {
+    checkQuery("multiThree~");
+  }
+
+  public void testRangeQuery() throws IOException, ParseException {
+    checkQuery("{multiA TO multiP}");
+  }
+
+  public void testMultiPhraseQuery() throws IOException, ParseException {
+      checkQuery("\"blueberry pi*\"");
+  }
+
+  public void testNoMatchQuery() throws IOException, ParseException {
+    checkQuery("+three +nomatch");
+  }
+
+  /*
+  public void testTermRepeatedQuery() throws IOException, ParseException {
+    // TODO: this corner case yields different results.
+    checkQuery("multi* multi* foo");
+  }
+  */
+
+  /**
+   * checks if a query yields the same result when executed on
+   * a single IndexSearcher containing all documents and on a
+   * MultiSearcher aggregating sub-searchers
+   * @param queryStr  the query to check.
+   * @throws IOException
+   * @throws ParseException
+   */
+  private void checkQuery(String queryStr) throws IOException, ParseException {
+    // check result hit ranking
+    if(VERBOSE) System.out.println("Query: " + queryStr);
+      QueryParser queryParser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, new StandardAnalyzer(TEST_VERSION_CURRENT));
+    Query query = queryParser.parse(queryStr);
+    ScoreDoc[] multiSearcherHits = multiSearcher.search(query, null, 1000).scoreDocs;
+    ScoreDoc[] singleSearcherHits = singleSearcher.search(query, null, 1000).scoreDocs;
+    assertEquals(multiSearcherHits.length, singleSearcherHits.length);
+    for (int i = 0; i < multiSearcherHits.length; i++) {
+      Document docMulti = multiSearcher.doc(multiSearcherHits[i].doc);
+      Document docSingle = singleSearcher.doc(singleSearcherHits[i].doc);
+      if(VERBOSE) System.out.println("Multi:  " + docMulti.get(FIELD_NAME) + " score="
+          + multiSearcherHits[i].score);
+      if(VERBOSE) System.out.println("Single: " + docSingle.get(FIELD_NAME) + " score="
+          + singleSearcherHits[i].score);
+      assertEquals(multiSearcherHits[i].score, singleSearcherHits[i].score,
+          0.001f);
+      assertEquals(docMulti.get(FIELD_NAME), docSingle.get(FIELD_NAME));
+    }
+    if(VERBOSE) System.out.println();
+  }
+  
+  /**
+   * initializes multiSearcher and singleSearcher with the same document set
+   */
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    // create MultiSearcher from two seperate searchers
+    d1 = newDirectory();
+    IndexWriter iw1 = new IndexWriter(d1, newIndexWriterConfig(TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT)).setMergePolicy(newLogMergePolicy()));
+    addCollection1(iw1);
+    iw1.close();
+    d2 = newDirectory();
+    IndexWriter iw2 = new IndexWriter(d2, newIndexWriterConfig(TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT)).setMergePolicy(newLogMergePolicy()));
+    addCollection2(iw2);
+    iw2.close();
+
+    Searchable[] s = new Searchable[2];
+    s[0] = new IndexSearcher(d1, true);
+    s[1] = new IndexSearcher(d2, true);
+    multiSearcher = new MultiSearcher(s);
+
+    // create IndexSearcher which contains all documents
+    d = newDirectory();
+    IndexWriter iw = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT)).setMergePolicy(newLogMergePolicy()));
+    addCollection1(iw);
+    addCollection2(iw);
+    iw.close();
+    singleSearcher = new IndexSearcher(d, true);
+  }
+  
+  Directory d1, d2, d;
+  
+  @Override
+  public void tearDown() throws Exception {
+    multiSearcher.close();
+    singleSearcher.close();
+    d1.close();
+    d2.close();
+    d.close();
+    super.tearDown();
+  }
+  
+  private void addCollection1(IndexWriter iw) throws IOException {
+    add("one blah three", iw);
+    add("one foo three multiOne", iw);
+    add("one foobar three multiThree", iw);
+    add("blueberry pie", iw);
+    add("blueberry strudel", iw);
+    add("blueberry pizza", iw);
+  }
+
+  private void addCollection2(IndexWriter iw) throws IOException {
+    add("two blah three", iw);
+    add("two foo xxx multiTwo", iw);
+    add("two foobar xxx multiThreee", iw);
+    add("blueberry chewing gum", iw);
+    add("bluebird pizza", iw);
+    add("bluebird foobar pizza", iw);
+    add("piccadilly circus", iw);
+  }
+  
+  private void add(String value, IndexWriter iw) throws IOException {
+    Document d = new Document();
+    d.add(newField(FIELD_NAME, value, Field.Store.YES, Field.Index.ANALYZED));
+    iw.addDocument(d);
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java b/lucene/backwards/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
new file mode 100644
index 0000000..bb27968
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
@@ -0,0 +1,713 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.SimpleAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.text.Collator;
+import java.util.Locale;
+
+import junit.framework.Assert;
+
+public class TestMultiTermConstantScore extends BaseTestRangeFilter {
+
+  /** threshold for comparing floats */
+  public static final float SCORE_COMP_THRESH = 1e-6f;
+
+  static Directory small;
+  static IndexReader reader;
+
+  static public void assertEquals(String m, float e, float a) {
+    Assert.assertEquals(m, e, a, SCORE_COMP_THRESH);
+  }
+
+  static public void assertEquals(String m, int e, int a) {
+    Assert.assertEquals(m, e, a);
+  }
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    String[] data = new String[] { "A 1 2 3 4 5 6", "Z       4 5 6", null,
+        "B   2   4 5 6", "Y     3   5 6", null, "C     3     6",
+        "X       4 5 6" };
+
+    small = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, small, 
+        newIndexWriterConfig(TEST_VERSION_CURRENT, 
+            new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMergePolicy(newLogMergePolicy()));
+
+    for (int i = 0; i < data.length; i++) {
+      Document doc = new Document();
+      doc.add(newField("id", String.valueOf(i), Field.Store.YES,
+          Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i)));
+      doc
+          .add(newField("all", "all", Field.Store.YES,
+              Field.Index.NOT_ANALYZED));// Field.Keyword("all","all"));
+      if (null != data[i]) {
+        doc.add(newField("data", data[i], Field.Store.YES,
+            Field.Index.ANALYZED));// Field.Text("data",data[i]));
+      }
+      writer.addDocument(doc);
+    }
+
+    reader = writer.getReader();
+    writer.close();
+  }
+
+  @AfterClass
+  public static void afterClass() throws Exception {
+    reader.close();
+    small.close();
+    reader = null;
+    small = null;
+  }
+
+  /** macro for readability */
+  public static Query csrq(String f, String l, String h, boolean il, boolean ih) {
+    TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
+    query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
+    return query;
+  }
+
+  public static Query csrq(String f, String l, String h, boolean il, boolean ih, MultiTermQuery.RewriteMethod method) {
+    TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
+    query.setRewriteMethod(method);
+    return query;
+  }
+
+  /** macro for readability */
+  public static Query csrq(String f, String l, String h, boolean il,
+      boolean ih, Collator c) {
+    TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih, c);
+    query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
+    return query;
+  }
+
+  /** macro for readability */
+  public static Query cspq(Term prefix) {
+    PrefixQuery query = new PrefixQuery(prefix);
+    query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
+    return query;
+  }
+
+  /** macro for readability */
+  public static Query cswcq(Term wild) {
+    WildcardQuery query = new WildcardQuery(wild);
+    query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
+    return query;
+  }
+
+  @Test
+  public void testBasics() throws IOException {
+    QueryUtils.check(csrq("data", "1", "6", T, T));
+    QueryUtils.check(csrq("data", "A", "Z", T, T));
+    QueryUtils.checkUnequal(csrq("data", "1", "6", T, T), csrq("data", "A",
+        "Z", T, T));
+
+    QueryUtils.check(cspq(new Term("data", "p*u?")));
+    QueryUtils.checkUnequal(cspq(new Term("data", "pre*")), cspq(new Term(
+        "data", "pres*")));
+
+    QueryUtils.check(cswcq(new Term("data", "p")));
+    QueryUtils.checkUnequal(cswcq(new Term("data", "pre*n?t")), cswcq(new Term(
+        "data", "pr*t?j")));
+  }
+
+  @Test
+  public void testBasicsRngCollating() throws IOException {
+    Collator c = Collator.getInstance(Locale.ENGLISH);
+    QueryUtils.check(csrq("data", "1", "6", T, T, c));
+    QueryUtils.check(csrq("data", "A", "Z", T, T, c));
+    QueryUtils.checkUnequal(csrq("data", "1", "6", T, T, c), csrq("data", "A",
+        "Z", T, T, c));
+  }
+
+  @Test
+  public void testEqualScores() throws IOException {
+    // NOTE: uses index build in *this* setUp
+
+    IndexSearcher search = newSearcher(reader);
+
+    ScoreDoc[] result;
+
+    // some hits match more terms then others, score should be the same
+
+    result = search.search(csrq("data", "1", "6", T, T), null, 1000).scoreDocs;
+    int numHits = result.length;
+    assertEquals("wrong number of results", 6, numHits);
+    float score = result[0].score;
+    for (int i = 1; i < numHits; i++) {
+      assertEquals("score for " + i + " was not the same", score,
+          result[i].score);
+    }
+
+    result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE), null, 1000).scoreDocs;
+    numHits = result.length;
+    assertEquals("wrong number of results", 6, numHits);
+    for (int i = 0; i < numHits; i++) {
+      assertEquals("score for " + i + " was not the same", score,
+          result[i].score);
+    }
+
+    search.close();
+  }
+
+  @Test
+  public void testBoost() throws IOException {
+    // NOTE: uses index build in *this* setUp
+
+    IndexSearcher search = newSearcher(reader);
+
+    // test for correct application of query normalization
+    // must use a non score normalizing method for this.
+    Query q = csrq("data", "1", "6", T, T);
+    q.setBoost(100);
+    search.search(q, null, new Collector() {
+      private int base = 0;
+      private Scorer scorer;
+      @Override
+      public void setScorer(Scorer scorer) throws IOException {
+        this.scorer = scorer;
+      }
+      @Override
+      public void collect(int doc) throws IOException {
+        assertEquals("score for doc " + (doc + base) + " was not correct", 1.0f, scorer.score());
+      }
+      @Override
+      public void setNextReader(IndexReader reader, int docBase) {
+        base = docBase;
+      }
+      @Override
+      public boolean acceptsDocsOutOfOrder() {
+        return true;
+      }
+    });
+
+    //
+    // Ensure that boosting works to score one clause of a query higher
+    // than another.
+    //
+    Query q1 = csrq("data", "A", "A", T, T); // matches document #0
+    q1.setBoost(.1f);
+    Query q2 = csrq("data", "Z", "Z", T, T); // matches document #1
+    BooleanQuery bq = new BooleanQuery(true);
+    bq.add(q1, BooleanClause.Occur.SHOULD);
+    bq.add(q2, BooleanClause.Occur.SHOULD);
+
+    ScoreDoc[] hits = search.search(bq, null, 1000).scoreDocs;
+    Assert.assertEquals(1, hits[0].doc);
+    Assert.assertEquals(0, hits[1].doc);
+    assertTrue(hits[0].score > hits[1].score);
+
+    q1 = csrq("data", "A", "A", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #0
+    q1.setBoost(.1f);
+    q2 = csrq("data", "Z", "Z", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #1
+    bq = new BooleanQuery(true);
+    bq.add(q1, BooleanClause.Occur.SHOULD);
+    bq.add(q2, BooleanClause.Occur.SHOULD);
+
+    hits = search.search(bq, null, 1000).scoreDocs;
+    Assert.assertEquals(1, hits[0].doc);
+    Assert.assertEquals(0, hits[1].doc);
+    assertTrue(hits[0].score > hits[1].score);
+
+    q1 = csrq("data", "A", "A", T, T); // matches document #0
+    q1.setBoost(10f);
+    q2 = csrq("data", "Z", "Z", T, T); // matches document #1
+    bq = new BooleanQuery(true);
+    bq.add(q1, BooleanClause.Occur.SHOULD);
+    bq.add(q2, BooleanClause.Occur.SHOULD);
+
+    hits = search.search(bq, null, 1000).scoreDocs;
+    Assert.assertEquals(0, hits[0].doc);
+    Assert.assertEquals(1, hits[1].doc);
+    assertTrue(hits[0].score > hits[1].score);
+    search.close();
+  }
+
+  @Test
+  public void testBooleanOrderUnAffected() throws IOException {
+    // NOTE: uses index build in *this* setUp
+
+    IndexSearcher search = newSearcher(reader);
+
+    // first do a regular TermRangeQuery which uses term expansion so
+    // docs with more terms in range get higher scores
+
+    Query rq = new TermRangeQuery("data", "1", "4", T, T);
+
+    ScoreDoc[] expected = search.search(rq, null, 1000).scoreDocs;
+    int numHits = expected.length;
+
+    // now do a boolean where which also contains a
+    // ConstantScoreRangeQuery and make sure hte order is the same
+
+    BooleanQuery q = new BooleanQuery();
+    q.add(rq, BooleanClause.Occur.MUST);// T, F);
+    q.add(csrq("data", "1", "6", T, T), BooleanClause.Occur.MUST);// T, F);
+
+    ScoreDoc[] actual = search.search(q, null, 1000).scoreDocs;
+
+    assertEquals("wrong numebr of hits", numHits, actual.length);
+    for (int i = 0; i < numHits; i++) {
+      assertEquals("mismatch in docid for hit#" + i, expected[i].doc,
+          actual[i].doc);
+    }
+
+    search.close();
+  }
+
+  @Test
+  public void testRangeQueryId() throws IOException {
+    // NOTE: uses index build in *super* setUp
+
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+
+    int medId = ((maxId - minId) / 2);
+
+    String minIP = pad(minId);
+    String maxIP = pad(maxId);
+    String medIP = pad(medId);
+
+    int numDocs = reader.numDocs();
+
+    assertEquals("num of docs", numDocs, 1 + maxId - minId);
+
+    ScoreDoc[] result;
+
+    // test id, bounded on both ends
+
+    result = search.search(csrq("id", minIP, maxIP, T, T), null, numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(csrq("id", minIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(csrq("id", minIP, maxIP, T, F), null, numDocs).scoreDocs;
+    assertEquals("all but last", numDocs - 1, result.length);
+
+    result = search.search(csrq("id", minIP, maxIP, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
+    assertEquals("all but last", numDocs - 1, result.length);
+
+    result = search.search(csrq("id", minIP, maxIP, F, T), null, numDocs).scoreDocs;
+    assertEquals("all but first", numDocs - 1, result.length);
+
+    result = search.search(csrq("id", minIP, maxIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
+    assertEquals("all but first", numDocs - 1, result.length);
+
+    result = search.search(csrq("id", minIP, maxIP, F, F), null, numDocs).scoreDocs;
+    assertEquals("all but ends", numDocs - 2, result.length);
+
+    result = search.search(csrq("id", minIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
+    assertEquals("all but ends", numDocs - 2, result.length);
+
+    result = search.search(csrq("id", medIP, maxIP, T, T), null, numDocs).scoreDocs;
+    assertEquals("med and up", 1 + maxId - medId, result.length);
+
+    result = search.search(csrq("id", medIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
+    assertEquals("med and up", 1 + maxId - medId, result.length);
+
+    result = search.search(csrq("id", minIP, medIP, T, T), null, numDocs).scoreDocs;
+    assertEquals("up to med", 1 + medId - minId, result.length);
+
+    result = search.search(csrq("id", minIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
+    assertEquals("up to med", 1 + medId - minId, result.length);
+
+    // unbounded id
+
+    result = search.search(csrq("id", minIP, null, T, F), null, numDocs).scoreDocs;
+    assertEquals("min and up", numDocs, result.length);
+
+    result = search.search(csrq("id", null, maxIP, F, T), null, numDocs).scoreDocs;
+    assertEquals("max and down", numDocs, result.length);
+
+    result = search.search(csrq("id", minIP, null, F, F), null, numDocs).scoreDocs;
+    assertEquals("not min, but up", numDocs - 1, result.length);
+
+    result = search.search(csrq("id", null, maxIP, F, F), null, numDocs).scoreDocs;
+    assertEquals("not max, but down", numDocs - 1, result.length);
+
+    result = search.search(csrq("id", medIP, maxIP, T, F), null, numDocs).scoreDocs;
+    assertEquals("med and up, not max", maxId - medId, result.length);
+
+    result = search.search(csrq("id", minIP, medIP, F, T), null, numDocs).scoreDocs;
+    assertEquals("not min, up to med", medId - minId, result.length);
+
+    // very small sets
+
+    result = search.search(csrq("id", minIP, minIP, F, F), null, numDocs).scoreDocs;
+    assertEquals("min,min,F,F", 0, result.length);
+
+    result = search.search(csrq("id", minIP, minIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
+    assertEquals("min,min,F,F", 0, result.length);
+
+    result = search.search(csrq("id", medIP, medIP, F, F), null, numDocs).scoreDocs;
+    assertEquals("med,med,F,F", 0, result.length);
+
+    result = search.search(csrq("id", medIP, medIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
+    assertEquals("med,med,F,F", 0, result.length);
+
+    result = search.search(csrq("id", maxIP, maxIP, F, F), null, numDocs).scoreDocs;
+    assertEquals("max,max,F,F", 0, result.length);
+
+    result = search.search(csrq("id", maxIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
+    assertEquals("max,max,F,F", 0, result.length);
+
+    result = search.search(csrq("id", minIP, minIP, T, T), null, numDocs).scoreDocs;
+    assertEquals("min,min,T,T", 1, result.length);
+
+    result = search.search(csrq("id", minIP, minIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
+    assertEquals("min,min,T,T", 1, result.length);
+
+    result = search.search(csrq("id", null, minIP, F, T), null, numDocs).scoreDocs;
+    assertEquals("nul,min,F,T", 1, result.length);
+
+    result = search.search(csrq("id", null, minIP, F, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
+    assertEquals("nul,min,F,T", 1, result.length);
+
+    result = search.search(csrq("id", maxIP, maxIP, T, T), null, numDocs).scoreDocs;
+    assertEquals("max,max,T,T", 1, result.length);
+
+    result = search.search(csrq("id", maxIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
+    assertEquals("max,max,T,T", 1, result.length);
+
+    result = search.search(csrq("id", maxIP, null, T, F), null, numDocs).scoreDocs;
+    assertEquals("max,nul,T,T", 1, result.length);
+
+    result = search.search(csrq("id", maxIP, null, T, F, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
+    assertEquals("max,nul,T,T", 1, result.length);
+
+    result = search.search(csrq("id", medIP, medIP, T, T), null, numDocs).scoreDocs;
+    assertEquals("med,med,T,T", 1, result.length);
+
+    result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
+    assertEquals("med,med,T,T", 1, result.length);
+    
+    search.close();
+  }
+
+  @Test
+  public void testRangeQueryIdCollating() throws IOException {
+    // NOTE: uses index build in *super* setUp
+
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+
+    int medId = ((maxId - minId) / 2);
+
+    String minIP = pad(minId);
+    String maxIP = pad(maxId);
+    String medIP = pad(medId);
+
+    int numDocs = reader.numDocs();
+
+    assertEquals("num of docs", numDocs, 1 + maxId - minId);
+
+    ScoreDoc[] result;
+
+    Collator c = Collator.getInstance(Locale.ENGLISH);
+
+    // test id, bounded on both ends
+
+    result = search.search(csrq("id", minIP, maxIP, T, T, c), null, numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(csrq("id", minIP, maxIP, T, F, c), null, numDocs).scoreDocs;
+    assertEquals("all but last", numDocs - 1, result.length);
+
+    result = search.search(csrq("id", minIP, maxIP, F, T, c), null, numDocs).scoreDocs;
+    assertEquals("all but first", numDocs - 1, result.length);
+
+    result = search.search(csrq("id", minIP, maxIP, F, F, c), null, numDocs).scoreDocs;
+    assertEquals("all but ends", numDocs - 2, result.length);
+
+    result = search.search(csrq("id", medIP, maxIP, T, T, c), null, numDocs).scoreDocs;
+    assertEquals("med and up", 1 + maxId - medId, result.length);
+
+    result = search.search(csrq("id", minIP, medIP, T, T, c), null, numDocs).scoreDocs;
+    assertEquals("up to med", 1 + medId - minId, result.length);
+
+    // unbounded id
+
+    result = search.search(csrq("id", minIP, null, T, F, c), null, numDocs).scoreDocs;
+    assertEquals("min and up", numDocs, result.length);
+
+    result = search.search(csrq("id", null, maxIP, F, T, c), null, numDocs).scoreDocs;
+    assertEquals("max and down", numDocs, result.length);
+
+    result = search.search(csrq("id", minIP, null, F, F, c), null, numDocs).scoreDocs;
+    assertEquals("not min, but up", numDocs - 1, result.length);
+
+    result = search.search(csrq("id", null, maxIP, F, F, c), null, numDocs).scoreDocs;
+    assertEquals("not max, but down", numDocs - 1, result.length);
+
+    result = search.search(csrq("id", medIP, maxIP, T, F, c), null, numDocs).scoreDocs;
+    assertEquals("med and up, not max", maxId - medId, result.length);
+
+    result = search.search(csrq("id", minIP, medIP, F, T, c), null, numDocs).scoreDocs;
+    assertEquals("not min, up to med", medId - minId, result.length);
+
+    // very small sets
+
+    result = search.search(csrq("id", minIP, minIP, F, F, c), null, numDocs).scoreDocs;
+    assertEquals("min,min,F,F,c", 0, result.length);
+    result = search.search(csrq("id", medIP, medIP, F, F, c), null, numDocs).scoreDocs;
+    assertEquals("med,med,F,F,c", 0, result.length);
+    result = search.search(csrq("id", maxIP, maxIP, F, F, c), null, numDocs).scoreDocs;
+    assertEquals("max,max,F,F,c", 0, result.length);
+
+    result = search.search(csrq("id", minIP, minIP, T, T, c), null, numDocs).scoreDocs;
+    assertEquals("min,min,T,T,c", 1, result.length);
+    result = search.search(csrq("id", null, minIP, F, T, c), null, numDocs).scoreDocs;
+    assertEquals("nul,min,F,T,c", 1, result.length);
+
+    result = search.search(csrq("id", maxIP, maxIP, T, T, c), null, numDocs).scoreDocs;
+    assertEquals("max,max,T,T,c", 1, result.length);
+    result = search.search(csrq("id", maxIP, null, T, F, c), null, numDocs).scoreDocs;
+    assertEquals("max,nul,T,T,c", 1, result.length);
+
+    result = search.search(csrq("id", medIP, medIP, T, T, c), null, numDocs).scoreDocs;
+    assertEquals("med,med,T,T,c", 1, result.length);
+    
+    search.close();
+  }
+
+  @Test
+  public void testRangeQueryRand() throws IOException {
+    // NOTE: uses index build in *super* setUp
+
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+
+    String minRP = pad(signedIndexDir.minR);
+    String maxRP = pad(signedIndexDir.maxR);
+
+    int numDocs = reader.numDocs();
+
+    assertEquals("num of docs", numDocs, 1 + maxId - minId);
+
+    ScoreDoc[] result;
+
+    // test extremes, bounded on both ends
+
+    result = search.search(csrq("rand", minRP, maxRP, T, T), null, numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(csrq("rand", minRP, maxRP, T, F), null, numDocs).scoreDocs;
+    assertEquals("all but biggest", numDocs - 1, result.length);
+
+    result = search.search(csrq("rand", minRP, maxRP, F, T), null, numDocs).scoreDocs;
+    assertEquals("all but smallest", numDocs - 1, result.length);
+
+    result = search.search(csrq("rand", minRP, maxRP, F, F), null, numDocs).scoreDocs;
+    assertEquals("all but extremes", numDocs - 2, result.length);
+
+    // unbounded
+
+    result = search.search(csrq("rand", minRP, null, T, F), null, numDocs).scoreDocs;
+    assertEquals("smallest and up", numDocs, result.length);
+
+    result = search.search(csrq("rand", null, maxRP, F, T), null, numDocs).scoreDocs;
+    assertEquals("biggest and down", numDocs, result.length);
+
+    result = search.search(csrq("rand", minRP, null, F, F), null, numDocs).scoreDocs;
+    assertEquals("not smallest, but up", numDocs - 1, result.length);
+
+    result = search.search(csrq("rand", null, maxRP, F, F), null, numDocs).scoreDocs;
+    assertEquals("not biggest, but down", numDocs - 1, result.length);
+
+    // very small sets
+
+    result = search.search(csrq("rand", minRP, minRP, F, F), null, numDocs).scoreDocs;
+    assertEquals("min,min,F,F", 0, result.length);
+    result = search.search(csrq("rand", maxRP, maxRP, F, F), null, numDocs).scoreDocs;
+    assertEquals("max,max,F,F", 0, result.length);
+
+    result = search.search(csrq("rand", minRP, minRP, T, T), null, numDocs).scoreDocs;
+    assertEquals("min,min,T,T", 1, result.length);
+    result = search.search(csrq("rand", null, minRP, F, T), null, numDocs).scoreDocs;
+    assertEquals("nul,min,F,T", 1, result.length);
+
+    result = search.search(csrq("rand", maxRP, maxRP, T, T), null, numDocs).scoreDocs;
+    assertEquals("max,max,T,T", 1, result.length);
+    result = search.search(csrq("rand", maxRP, null, T, F), null, numDocs).scoreDocs;
+    assertEquals("max,nul,T,T", 1, result.length);
+
+    search.close();
+  }
+
+  @Test
+  public void testRangeQueryRandCollating() throws IOException {
+    // NOTE: uses index build in *super* setUp
+
+    // using the unsigned index because collation seems to ignore hyphens
+    IndexReader reader = unsignedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+
+    String minRP = pad(unsignedIndexDir.minR);
+    String maxRP = pad(unsignedIndexDir.maxR);
+
+    int numDocs = reader.numDocs();
+
+    assertEquals("num of docs", numDocs, 1 + maxId - minId);
+
+    ScoreDoc[] result;
+
+    Collator c = Collator.getInstance(Locale.ENGLISH);
+
+    // test extremes, bounded on both ends
+
+    result = search.search(csrq("rand", minRP, maxRP, T, T, c), null, numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(csrq("rand", minRP, maxRP, T, F, c), null, numDocs).scoreDocs;
+    assertEquals("all but biggest", numDocs - 1, result.length);
+
+    result = search.search(csrq("rand", minRP, maxRP, F, T, c), null, numDocs).scoreDocs;
+    assertEquals("all but smallest", numDocs - 1, result.length);
+
+    result = search.search(csrq("rand", minRP, maxRP, F, F, c), null, numDocs).scoreDocs;
+    assertEquals("all but extremes", numDocs - 2, result.length);
+
+    // unbounded
+
+    result = search.search(csrq("rand", minRP, null, T, F, c), null, numDocs).scoreDocs;
+    assertEquals("smallest and up", numDocs, result.length);
+
+    result = search.search(csrq("rand", null, maxRP, F, T, c), null, numDocs).scoreDocs;
+    assertEquals("biggest and down", numDocs, result.length);
+
+    result = search.search(csrq("rand", minRP, null, F, F, c), null, numDocs).scoreDocs;
+    assertEquals("not smallest, but up", numDocs - 1, result.length);
+
+    result = search.search(csrq("rand", null, maxRP, F, F, c), null, numDocs).scoreDocs;
+    assertEquals("not biggest, but down", numDocs - 1, result.length);
+
+    // very small sets
+
+    result = search.search(csrq("rand", minRP, minRP, F, F, c), null, numDocs).scoreDocs;
+    assertEquals("min,min,F,F,c", 0, result.length);
+    result = search.search(csrq("rand", maxRP, maxRP, F, F, c), null, numDocs).scoreDocs;
+    assertEquals("max,max,F,F,c", 0, result.length);
+
+    result = search.search(csrq("rand", minRP, minRP, T, T, c), null, numDocs).scoreDocs;
+    assertEquals("min,min,T,T,c", 1, result.length);
+    result = search.search(csrq("rand", null, minRP, F, T, c), null, numDocs).scoreDocs;
+    assertEquals("nul,min,F,T,c", 1, result.length);
+
+    result = search.search(csrq("rand", maxRP, maxRP, T, T, c), null, numDocs).scoreDocs;
+    assertEquals("max,max,T,T,c", 1, result.length);
+    result = search.search(csrq("rand", maxRP, null, T, F, c), null, numDocs).scoreDocs;
+    assertEquals("max,nul,T,T,c", 1, result.length);
+    
+    search.close();
+  }
+
+  @Test
+  public void testFarsi() throws Exception {
+
+    /* build an index */
+    Directory farsiIndex = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, farsiIndex, new SimpleAnalyzer(TEST_VERSION_CURRENT));
+    Document doc = new Document();
+    doc.add(newField("content", "\u0633\u0627\u0628", Field.Store.YES,
+        Field.Index.NOT_ANALYZED));
+    doc
+        .add(newField("body", "body", Field.Store.YES,
+            Field.Index.NOT_ANALYZED));
+    writer.addDocument(doc);
+
+    IndexReader reader = writer.getReader();
+    writer.close();
+
+    IndexSearcher search = newSearcher(reader);
+
+    // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
+    // RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
+    // characters properly.
+    Collator c = Collator.getInstance(new Locale("ar"));
+
+    // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
+    // orders the U+0698 character before the U+0633 character, so the single
+    // index Term below should NOT be returned by a ConstantScoreRangeQuery
+    // with a Farsi Collator (or an Arabic one for the case when Farsi is
+    // not supported).
+    ScoreDoc[] result = search.search(csrq("content", "\u062F", "\u0698", T, T,
+        c), null, 1000).scoreDocs;
+    assertEquals("The index Term should not be included.", 0, result.length);
+
+    result = search.search(csrq("content", "\u0633", "\u0638", T, T, c), null,
+        1000).scoreDocs;
+    assertEquals("The index Term should be included.", 1, result.length);
+    search.close();
+    reader.close();
+    farsiIndex.close();
+  }
+
+  @Test
+  public void testDanish() throws Exception {
+
+    /* build an index */
+    Directory danishIndex = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, danishIndex, new SimpleAnalyzer(TEST_VERSION_CURRENT));
+
+    // Danish collation orders the words below in the given order
+    // (example taken from TestSort.testInternationalSort() ).
+    String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
+    for (int docnum = 0 ; docnum < words.length ; ++docnum) {   
+      Document doc = new Document();
+      doc.add(newField("content", words[docnum], 
+                        Field.Store.YES, Field.Index.NOT_ANALYZED));
+      doc.add(newField("body", "body",
+                        Field.Store.YES, Field.Index.NOT_ANALYZED));
+      writer.addDocument(doc);
+    }
+    IndexReader reader = writer.getReader();
+    writer.close();
+
+    IndexSearcher search = newSearcher(reader);
+
+    Collator c = Collator.getInstance(new Locale("da", "dk"));
+
+    // Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
+    // but Danish collation does.
+    ScoreDoc[] result = search.search
+      (csrq("content", "H\u00D8T", "MAND", F, F, c), null, 1000).scoreDocs;
+    assertEquals("The index Term should be included.", 1, result.length);
+
+    result = search.search
+      (csrq("content", "H\u00C5T", "MAND", F, F, c), null, 1000).scoreDocs;
+    assertEquals("The index Term should not be included.", 0, result.length);
+    search.close();
+    reader.close();
+    danishIndex.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java b/lucene/backwards/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
new file mode 100644
index 0000000..adb9474
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
@@ -0,0 +1,191 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.TermFreqVector;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.English;
+
+import java.io.IOException;
+
+public class TestMultiThreadTermVectors extends LuceneTestCase {
+  private Directory directory;
+  public int numDocs = 100;
+  public int numThreads = 3;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    directory = newDirectory();
+    IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    //writer.setUseCompoundFile(false);
+    //writer.infoStream = System.out;
+    for (int i = 0; i < numDocs; i++) {
+      Document doc = new Document();
+      Fieldable fld = newField("field", English.intToEnglish(i), Field.Store.YES, Field.Index.NOT_ANALYZED, Field.TermVector.YES);
+      doc.add(fld);
+      writer.addDocument(doc);
+    }
+    writer.close();
+    
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    directory.close();
+    super.tearDown();
+  }
+  
+  public void test() throws Exception {
+    
+    IndexReader reader = null;
+    
+    try {
+      reader = IndexReader.open(directory, true);
+      for(int i = 1; i <= numThreads; i++)
+        testTermPositionVectors(reader, i);
+      
+      
+    }
+    catch (IOException ioe) {
+      fail(ioe.getMessage());
+    }
+    finally {
+      if (reader != null) {
+        try {
+          /** close the opened reader */
+          reader.close();
+        } catch (IOException ioe) {
+          ioe.printStackTrace();
+        }
+      }
+    }
+  }
+  
+  public void testTermPositionVectors(final IndexReader reader, int threadCount) throws Exception {
+    MultiThreadTermVectorsReader[] mtr = new MultiThreadTermVectorsReader[threadCount];
+    for (int i = 0; i < threadCount; i++) {
+      mtr[i] = new MultiThreadTermVectorsReader();
+      mtr[i].init(reader);
+    }
+    
+    
+    /** run until all threads finished */ 
+    int threadsAlive = mtr.length;
+    while (threadsAlive > 0) {
+        //System.out.println("Threads alive");
+        Thread.sleep(10);
+        threadsAlive = mtr.length;
+        for (int i = 0; i < mtr.length; i++) {
+          if (mtr[i].isAlive() == true) {
+            break;
+          }
+          
+          threadsAlive--; 
+        }
+    }
+    
+    long totalTime = 0L;
+    for (int i = 0; i < mtr.length; i++) {
+      totalTime += mtr[i].timeElapsed;
+      mtr[i] = null;
+    }
+    
+    //System.out.println("threadcount: " + mtr.length + " average term vector time: " + totalTime/mtr.length);
+    
+  }
+  
+}
+
+class MultiThreadTermVectorsReader implements Runnable {
+  
+  private IndexReader reader = null;
+  private Thread t = null;
+  
+  private final int runsToDo = 100;
+  long timeElapsed = 0;
+  
+  
+  public void init(IndexReader reader) {
+    this.reader = reader;
+    timeElapsed = 0;
+    t=new Thread(this);
+    t.start();
+  }
+    
+  public boolean isAlive() {
+    if (t == null) return false;
+    
+    return t.isAlive();
+  }
+  
+  public void run() {
+      try {
+        // run the test 100 times
+        for (int i = 0; i < runsToDo; i++)
+          testTermVectors();
+      }
+      catch (Exception e) {
+        e.printStackTrace();
+      }
+      return;
+  }
+  
+  private void testTermVectors() throws Exception {
+    // check:
+    int numDocs = reader.numDocs();
+    long start = 0L;
+    for (int docId = 0; docId < numDocs; docId++) {
+      start = System.currentTimeMillis();
+      TermFreqVector [] vectors = reader.getTermFreqVectors(docId);
+      timeElapsed += System.currentTimeMillis()-start;
+      
+      // verify vectors result
+      verifyVectors(vectors, docId);
+      
+      start = System.currentTimeMillis();
+      TermFreqVector vector = reader.getTermFreqVector(docId, "field");
+      timeElapsed += System.currentTimeMillis()-start;
+      
+      vectors = new TermFreqVector[1];
+      vectors[0] = vector;
+      
+      verifyVectors(vectors, docId);
+      
+    }
+  }
+  
+  private void verifyVectors(TermFreqVector[] vectors, int num) {
+    StringBuilder temp = new StringBuilder();
+    String[] terms = null;
+    for (int i = 0; i < vectors.length; i++) {
+      terms = vectors[i].getTerms();
+      for (int z = 0; z < terms.length; z++) {
+        temp.append(terms[z]);
+      }
+    }
+    
+    if (!English.intToEnglish(num).trim().equals(temp.toString().trim()))
+        System.out.println("wrong term result");
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
new file mode 100644
index 0000000..9a22ce5
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
@@ -0,0 +1,81 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Locale;
+import java.text.DecimalFormat;
+import java.text.DecimalFormatSymbols;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericField;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
+
+  /** Tests NumericRangeQuery on a multi-valued field (multiple numeric values per document).
+   * This test ensures, that a classical TermRangeQuery returns exactly the same document numbers as
+   * NumericRangeQuery (see SOLR-1322 for discussion) and the multiple precision terms per numeric value
+   * do not interfere with multiple numeric values.
+   */
+  public void testMultiValuedNRQ() throws Exception {
+    Directory directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
+    
+    DecimalFormat format = new DecimalFormat("00000000000", new DecimalFormatSymbols(Locale.US));
+    
+    int num = atLeast(500);
+    for (int l = 0; l < num; l++) {
+      Document doc = new Document();
+      for (int m=0, c=random.nextInt(10); m<=c; m++) {
+        int value = random.nextInt(Integer.MAX_VALUE);
+        doc.add(newField("asc", format.format(value), Field.Store.NO, Field.Index.NOT_ANALYZED));
+        doc.add(new NumericField("trie", Field.Store.NO, true).setIntValue(value));
+      }
+      writer.addDocument(doc);
+    }
+    IndexReader reader = writer.getReader();
+    writer.close();
+    
+    IndexSearcher searcher=newSearcher(reader);
+    num = atLeast(50);
+    for (int i = 0; i < num; i++) {
+      int lower=random.nextInt(Integer.MAX_VALUE);
+      int upper=random.nextInt(Integer.MAX_VALUE);
+      if (lower>upper) {
+        int a=lower; lower=upper; upper=a;
+      }
+      TermRangeQuery cq=new TermRangeQuery("asc", format.format(lower), format.format(upper), true, true);
+      NumericRangeQuery<Integer> tq=NumericRangeQuery.newIntRange("trie", lower, upper, true, true);
+      TopDocs trTopDocs = searcher.search(cq, 1);
+      TopDocs nrTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", trTopDocs.totalHits, nrTopDocs.totalHits );
+    }
+    searcher.close();
+    reader.close();
+    directory.close();
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestNot.java b/lucene/backwards/src/test/org/apache/lucene/search/TestNot.java
new file mode 100644
index 0000000..f077de3
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestNot.java
@@ -0,0 +1,59 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.SimpleAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+
+/** Similarity unit test.
+ *
+ *
+ * @version $Revision$
+ */
+public class TestNot extends LuceneTestCase {
+
+  public void testNot() throws Exception {
+    Directory store = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, store);
+
+    Document d1 = new Document();
+    d1.add(newField("field", "a b", Field.Store.YES, Field.Index.ANALYZED));
+
+    writer.addDocument(d1);
+    IndexReader reader = writer.getReader();
+
+    IndexSearcher searcher = newSearcher(reader);
+      QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random));
+    Query query = parser.parse("a NOT b");
+    //System.out.println(query);
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+    writer.close();
+    searcher.close();
+    reader.close();
+    store.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java b/lucene/backwards/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
new file mode 100644
index 0000000..e82679e
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
@@ -0,0 +1,595 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericField;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util._TestUtil;
+
+import org.junit.Test;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+public class TestNumericRangeQuery32 extends LuceneTestCase {
+  // distance of entries
+  private static final int distance = 6666;
+  // shift the starting of the values to the left, to also have negative values:
+  private static final int startOffset = - 1 << 15;
+  // number of docs to generate for testing
+  private static int noDocs;
+  
+  private static Directory directory = null;
+  private static IndexReader reader = null;
+  private static IndexSearcher searcher = null;
+  
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    noDocs = atLeast(4096);
+    directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(_TestUtil.nextInt(random, 100, 1000))
+        .setMergePolicy(newLogMergePolicy()));
+    
+    NumericField
+      field8 = new NumericField("field8", 8, Field.Store.YES, true),
+      field4 = new NumericField("field4", 4, Field.Store.YES, true),
+      field2 = new NumericField("field2", 2, Field.Store.YES, true),
+      fieldNoTrie = new NumericField("field"+Integer.MAX_VALUE, Integer.MAX_VALUE, Field.Store.YES, true),
+      ascfield8 = new NumericField("ascfield8", 8, Field.Store.NO, true),
+      ascfield4 = new NumericField("ascfield4", 4, Field.Store.NO, true),
+      ascfield2 = new NumericField("ascfield2", 2, Field.Store.NO, true);
+    
+    Document doc = new Document();
+    // add fields, that have a distance to test general functionality
+    doc.add(field8); doc.add(field4); doc.add(field2); doc.add(fieldNoTrie);
+    // add ascending fields with a distance of 1, beginning at -noDocs/2 to test the correct splitting of range and inclusive/exclusive
+    doc.add(ascfield8); doc.add(ascfield4); doc.add(ascfield2);
+    
+    // Add a series of noDocs docs with increasing int values
+    for (int l=0; l<noDocs; l++) {
+      int val=distance*l+startOffset;
+      field8.setIntValue(val);
+      field4.setIntValue(val);
+      field2.setIntValue(val);
+      fieldNoTrie.setIntValue(val);
+
+      val=l-(noDocs/2);
+      ascfield8.setIntValue(val);
+      ascfield4.setIntValue(val);
+      ascfield2.setIntValue(val);
+      writer.addDocument(doc);
+    }
+  
+    reader = writer.getReader();
+    searcher=newSearcher(reader);
+    writer.close();
+  }
+  
+  @AfterClass
+  public static void afterClass() throws Exception {
+    searcher.close();
+    searcher = null;
+    reader.close();
+    reader = null;
+    directory.close();
+    directory = null;
+  }
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    // set the theoretical maximum term count for 8bit (see docs for the number)
+    // super.tearDown will restore the default
+    BooleanQuery.setMaxClauseCount(3*255*2 + 255);
+  }
+  
+  /** test for both constant score and boolean query, the other tests only use the constant score mode */
+  private void testRange(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int count=3000;
+    int lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
+    NumericRangeQuery<Integer> q = NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
+    NumericRangeFilter<Integer> f = NumericRangeFilter.newIntRange(field, precisionStep, lower, upper, true, true);
+    int lastTerms = 0;
+    for (byte i=0; i<3; i++) {
+      TopDocs topDocs;
+      int terms;
+      String type;
+      q.clearTotalNumberOfTerms();
+      f.clearTotalNumberOfTerms();
+      switch (i) {
+        case 0:
+          type = " (constant score filter rewrite)";
+          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
+          topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
+          terms = q.getTotalNumberOfTerms();
+          break;
+        case 1:
+          type = " (constant score boolean rewrite)";
+          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
+          topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
+          terms = q.getTotalNumberOfTerms();
+          break;
+        case 2:
+          type = " (filter)";
+          topDocs = searcher.search(new MatchAllDocsQuery(), f, noDocs, Sort.INDEXORDER);
+          terms = f.getTotalNumberOfTerms();
+          break;
+        default:
+          return;
+      }
+      if (VERBOSE) System.out.println("Found "+terms+" distinct terms in range for field '"+field+"'"+type+".");
+      ScoreDoc[] sd = topDocs.scoreDocs;
+      assertNotNull(sd);
+      assertEquals("Score doc count"+type, count, sd.length );
+      Document doc=searcher.doc(sd[0].doc);
+      assertEquals("First doc"+type, 2*distance+startOffset, Integer.parseInt(doc.get(field)) );
+      doc=searcher.doc(sd[sd.length-1].doc);
+      assertEquals("Last doc"+type, (1+count)*distance+startOffset, Integer.parseInt(doc.get(field)) );
+      if (i>0 && 
+          (searcher.getIndexReader().getSequentialSubReaders() == null || 
+           searcher.getIndexReader().getSequentialSubReaders().length == 1)) {
+        assertEquals("Distinct term number is equal for all query types", lastTerms, terms);
+      }
+      lastTerms = terms;
+    }
+  }
+
+  @Test
+  public void testRange_8bit() throws Exception {
+    testRange(8);
+  }
+  
+  @Test
+  public void testRange_4bit() throws Exception {
+    testRange(4);
+  }
+  
+  @Test
+  public void testRange_2bit() throws Exception {
+    testRange(2);
+  }
+  
+  @Test
+  public void testInverseRange() throws Exception {
+    NumericRangeFilter<Integer> f = NumericRangeFilter.newIntRange("field8", 8, 1000, -1000, true, true);
+    assertSame("A inverse range should return the EMPTY_DOCIDSET instance", DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(searcher.getIndexReader()));
+    f = NumericRangeFilter.newIntRange("field8", 8, Integer.MAX_VALUE, null, false, false);
+    assertSame("A exclusive range starting with Integer.MAX_VALUE should return the EMPTY_DOCIDSET instance",
+      DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(searcher.getIndexReader()));
+    f = NumericRangeFilter.newIntRange("field8", 8, null, Integer.MIN_VALUE, false, false);
+    assertSame("A exclusive range ending with Integer.MIN_VALUE should return the EMPTY_DOCIDSET instance",
+      DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(searcher.getIndexReader()));
+  }
+  
+  @Test
+  public void testOneMatchQuery() throws Exception {
+    NumericRangeQuery<Integer> q = NumericRangeQuery.newIntRange("ascfield8", 8, 1000, 1000, true, true);
+    assertSame(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE, q.getRewriteMethod());
+    TopDocs topDocs = searcher.search(q, noDocs);
+    ScoreDoc[] sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", 1, sd.length );
+  }
+  
+  private void testLeftOpenRange(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int count=3000;
+    int upper=(count-1)*distance + (distance/3) + startOffset;
+    NumericRangeQuery<Integer> q=NumericRangeQuery.newIntRange(field, precisionStep, null, upper, true, true);
+    TopDocs topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
+    if (VERBOSE) System.out.println("Found "+q.getTotalNumberOfTerms()+" distinct terms in left open range for field '"+field+"'.");
+    ScoreDoc[] sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", count, sd.length );
+    Document doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", startOffset, Integer.parseInt(doc.get(field)) );
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (count-1)*distance+startOffset, Integer.parseInt(doc.get(field)) );
+    
+    q=NumericRangeQuery.newIntRange(field, precisionStep, null, upper, false, true);
+    topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
+    sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", count, sd.length );
+    doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", startOffset, Integer.parseInt(doc.get(field)) );
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (count-1)*distance+startOffset, Integer.parseInt(doc.get(field)) );
+  }
+  
+  @Test
+  public void testLeftOpenRange_8bit() throws Exception {
+    testLeftOpenRange(8);
+  }
+  
+  @Test
+  public void testLeftOpenRange_4bit() throws Exception {
+    testLeftOpenRange(4);
+  }
+  
+  @Test
+  public void testLeftOpenRange_2bit() throws Exception {
+    testLeftOpenRange(2);
+  }
+  
+  private void testRightOpenRange(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int count=3000;
+    int lower=(count-1)*distance + (distance/3) +startOffset;
+    NumericRangeQuery<Integer> q=NumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, true);
+    TopDocs topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
+    if (VERBOSE) System.out.println("Found "+q.getTotalNumberOfTerms()+" distinct terms in right open range for field '"+field+"'.");
+    ScoreDoc[] sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", noDocs-count, sd.length );
+    Document doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", count*distance+startOffset, Integer.parseInt(doc.get(field)) );
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (noDocs-1)*distance+startOffset, Integer.parseInt(doc.get(field)) );
+
+    q=NumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, false);
+    topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
+    sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", noDocs-count, sd.length );
+    doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", count*distance+startOffset, Integer.parseInt(doc.get(field)) );
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (noDocs-1)*distance+startOffset, Integer.parseInt(doc.get(field)) );
+  }
+  
+  @Test
+  public void testRightOpenRange_8bit() throws Exception {
+    testRightOpenRange(8);
+  }
+  
+  @Test
+  public void testRightOpenRange_4bit() throws Exception {
+    testRightOpenRange(4);
+  }
+  
+  @Test
+  public void testRightOpenRange_2bit() throws Exception {
+    testRightOpenRange(2);
+  }
+  
+  @Test
+  public void testInfiniteValues() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    doc.add(new NumericField("float").setFloatValue(Float.NEGATIVE_INFINITY));
+    doc.add(new NumericField("int").setIntValue(Integer.MIN_VALUE));
+    writer.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new NumericField("float").setFloatValue(Float.POSITIVE_INFINITY));
+    doc.add(new NumericField("int").setIntValue(Integer.MAX_VALUE));
+    writer.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new NumericField("float").setFloatValue(0.0f));
+    doc.add(new NumericField("int").setIntValue(0));
+    writer.addDocument(doc);
+    writer.close();
+    
+    IndexSearcher s = new IndexSearcher(dir);
+    
+    Query q=NumericRangeQuery.newIntRange("int", null, null, true, true);
+    TopDocs topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+    
+    q=NumericRangeQuery.newIntRange("int", null, null, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q=NumericRangeQuery.newIntRange("int", Integer.MIN_VALUE, Integer.MAX_VALUE, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+    
+    q=NumericRangeQuery.newIntRange("int", Integer.MIN_VALUE, Integer.MAX_VALUE, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
+
+    q=NumericRangeQuery.newFloatRange("float", null, null, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q=NumericRangeQuery.newFloatRange("float", null, null, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    s.close();
+    dir.close();
+  }
+  
+  private void testRandomTrieAndClassicRangeQuery(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int termCountT=0,termCountC=0;
+    int num = _TestUtil.nextInt(random, 10, 20);
+    for (int i = 0; i < num; i++) {
+      int lower=(int)(random.nextDouble()*noDocs*distance)+startOffset;
+      int upper=(int)(random.nextDouble()*noDocs*distance)+startOffset;
+      if (lower>upper) {
+        int a=lower; lower=upper; upper=a;
+      }
+      // test inclusive range
+      NumericRangeQuery<Integer> tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
+      TermRangeQuery cq=new TermRangeQuery(field, NumericUtils.intToPrefixCoded(lower), NumericUtils.intToPrefixCoded(upper), true, true);
+      TopDocs tTopDocs = searcher.search(tq, 1);
+      TopDocs cTopDocs = searcher.search(cq, 1);
+      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
+      termCountT += tq.getTotalNumberOfTerms();
+      termCountC += cq.getTotalNumberOfTerms();
+      // test exclusive range
+      tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, false);
+      cq=new TermRangeQuery(field, NumericUtils.intToPrefixCoded(lower), NumericUtils.intToPrefixCoded(upper), false, false);
+      tTopDocs = searcher.search(tq, 1);
+      cTopDocs = searcher.search(cq, 1);
+      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
+      termCountT += tq.getTotalNumberOfTerms();
+      termCountC += cq.getTotalNumberOfTerms();
+      // test left exclusive range
+      tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, true);
+      cq=new TermRangeQuery(field, NumericUtils.intToPrefixCoded(lower), NumericUtils.intToPrefixCoded(upper), false, true);
+      tTopDocs = searcher.search(tq, 1);
+      cTopDocs = searcher.search(cq, 1);
+      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
+      termCountT += tq.getTotalNumberOfTerms();
+      termCountC += cq.getTotalNumberOfTerms();
+      // test right exclusive range
+      tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, false);
+      cq=new TermRangeQuery(field, NumericUtils.intToPrefixCoded(lower), NumericUtils.intToPrefixCoded(upper), true, false);
+      tTopDocs = searcher.search(tq, 1);
+      cTopDocs = searcher.search(cq, 1);
+      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
+      termCountT += tq.getTotalNumberOfTerms();
+      termCountC += cq.getTotalNumberOfTerms();
+    }
+    if (precisionStep == Integer.MAX_VALUE && 
+        (searcher.getIndexReader().getSequentialSubReaders() == null || 
+         searcher.getIndexReader().getSequentialSubReaders().length == 1)) {
+      assertEquals("Total number of terms should be equal for unlimited precStep", termCountT, termCountC);
+    } else if (VERBOSE) {
+      System.out.println("Average number of terms during random search on '" + field + "':");
+      System.out.println(" Trie query: " + (((double)termCountT)/(num * 4)));
+      System.out.println(" Classical query: " + (((double)termCountC)/(num * 4)));
+    }
+  }
+  
+  @Test
+  public void testRandomTrieAndClassicRangeQuery_8bit() throws Exception {
+    testRandomTrieAndClassicRangeQuery(8);
+  }
+  
+  @Test
+  public void testRandomTrieAndClassicRangeQuery_4bit() throws Exception {
+    testRandomTrieAndClassicRangeQuery(4);
+  }
+  
+  @Test
+  public void testRandomTrieAndClassicRangeQuery_2bit() throws Exception {
+    testRandomTrieAndClassicRangeQuery(2);
+  }
+  
+  @Test
+  public void testRandomTrieAndClassicRangeQuery_NoTrie() throws Exception {
+    testRandomTrieAndClassicRangeQuery(Integer.MAX_VALUE);
+  }
+  
+  private void testRangeSplit(int precisionStep) throws Exception {
+    String field="ascfield"+precisionStep;
+    // 10 random tests
+    int num = _TestUtil.nextInt(random, 10, 20);
+    for (int  i =0;  i< num; i++) {
+      int lower=(int)(random.nextDouble()*noDocs - noDocs/2);
+      int upper=(int)(random.nextDouble()*noDocs - noDocs/2);
+      if (lower>upper) {
+        int a=lower; lower=upper; upper=a;
+      }
+      // test inclusive range
+      Query tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
+      TopDocs tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
+      // test exclusive range
+      tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, false);
+      tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to exclusive range length", Math.max(upper-lower-1, 0), tTopDocs.totalHits );
+      // test left exclusive range
+      tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, true);
+      tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
+      // test right exclusive range
+      tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, false);
+      tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
+    }
+  }
+
+  @Test
+  public void testRangeSplit_8bit() throws Exception {
+    testRangeSplit(8);
+  }
+  
+  @Test
+  public void testRangeSplit_4bit() throws Exception {
+    testRangeSplit(4);
+  }
+  
+  @Test
+  public void testRangeSplit_2bit() throws Exception {
+    testRangeSplit(2);
+  }
+  
+  /** we fake a float test using int2float conversion of NumericUtils */
+  private void testFloatRange(int precisionStep) throws Exception {
+    final String field="ascfield"+precisionStep;
+    final int lower=-1000, upper=+2000;
+    
+    Query tq=NumericRangeQuery.newFloatRange(field, precisionStep,
+      NumericUtils.sortableIntToFloat(lower), NumericUtils.sortableIntToFloat(upper), true, true);
+    TopDocs tTopDocs = searcher.search(tq, 1);
+    assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
+    
+    Filter tf=NumericRangeFilter.newFloatRange(field, precisionStep,
+      NumericUtils.sortableIntToFloat(lower), NumericUtils.sortableIntToFloat(upper), true, true);
+    tTopDocs = searcher.search(new MatchAllDocsQuery(), tf, 1);
+    assertEquals("Returned count of range filter must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
+  }
+
+  @Test
+  public void testFloatRange_8bit() throws Exception {
+    testFloatRange(8);
+  }
+  
+  @Test
+  public void testFloatRange_4bit() throws Exception {
+    testFloatRange(4);
+  }
+  
+  @Test
+  public void testFloatRange_2bit() throws Exception {
+    testFloatRange(2);
+  }
+  
+  private void testSorting(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    // 10 random tests, the index order is ascending,
+    // so using a reverse sort field should retun descending documents
+    int num = _TestUtil.nextInt(random, 10, 20);
+    for (int i = 0; i < num; i++) {
+      int lower=(int)(random.nextDouble()*noDocs*distance)+startOffset;
+      int upper=(int)(random.nextDouble()*noDocs*distance)+startOffset;
+      if (lower>upper) {
+        int a=lower; lower=upper; upper=a;
+      }
+      Query tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
+      TopDocs topDocs = searcher.search(tq, null, noDocs, new Sort(new SortField(field, SortField.INT, true)));
+      if (topDocs.totalHits==0) continue;
+      ScoreDoc[] sd = topDocs.scoreDocs;
+      assertNotNull(sd);
+      int last=Integer.parseInt(searcher.doc(sd[0].doc).get(field));
+      for (int j=1; j<sd.length; j++) {
+        int act=Integer.parseInt(searcher.doc(sd[j].doc).get(field));
+        assertTrue("Docs should be sorted backwards", last>act );
+        last=act;
+      }
+    }
+  }
+
+  @Test
+  public void testSorting_8bit() throws Exception {
+    testSorting(8);
+  }
+  
+  @Test
+  public void testSorting_4bit() throws Exception {
+    testSorting(4);
+  }
+  
+  @Test
+  public void testSorting_2bit() throws Exception {
+    testSorting(2);
+  }
+  
+  @Test
+  public void testEqualsAndHash() throws Exception {
+    QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test1", 4, 10, 20, true, true));
+    QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test2", 4, 10, 20, false, true));
+    QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test3", 4, 10, 20, true, false));
+    QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test4", 4, 10, 20, false, false));
+    QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test5", 4, 10, null, true, true));
+    QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test6", 4, null, 20, true, true));
+    QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test7", 4, null, null, true, true));
+    QueryUtils.checkEqual(
+      NumericRangeQuery.newIntRange("test8", 4, 10, 20, true, true), 
+      NumericRangeQuery.newIntRange("test8", 4, 10, 20, true, true)
+    );
+    QueryUtils.checkUnequal(
+      NumericRangeQuery.newIntRange("test9", 4, 10, 20, true, true), 
+      NumericRangeQuery.newIntRange("test9", 8, 10, 20, true, true)
+    );
+    QueryUtils.checkUnequal(
+      NumericRangeQuery.newIntRange("test10a", 4, 10, 20, true, true), 
+      NumericRangeQuery.newIntRange("test10b", 4, 10, 20, true, true)
+    );
+    QueryUtils.checkUnequal(
+      NumericRangeQuery.newIntRange("test11", 4, 10, 20, true, true), 
+      NumericRangeQuery.newIntRange("test11", 4, 20, 10, true, true)
+    );
+    QueryUtils.checkUnequal(
+      NumericRangeQuery.newIntRange("test12", 4, 10, 20, true, true), 
+      NumericRangeQuery.newIntRange("test12", 4, 10, 20, false, true)
+    );
+    QueryUtils.checkUnequal(
+      NumericRangeQuery.newIntRange("test13", 4, 10, 20, true, true), 
+      NumericRangeQuery.newFloatRange("test13", 4, 10f, 20f, true, true)
+    );
+    // the following produces a hash collision, because Long and Integer have the same hashcode, so only test equality:
+    Query q1 = NumericRangeQuery.newIntRange("test14", 4, 10, 20, true, true);
+    Query q2 = NumericRangeQuery.newLongRange("test14", 4, 10L, 20L, true, true);
+    assertFalse(q1.equals(q2));
+    assertFalse(q2.equals(q1));
+  }
+  
+  private void testEnum(int lower, int upper) throws Exception {
+    NumericRangeQuery<Integer> q = NumericRangeQuery.newIntRange("field4", 4, lower, upper, true, true);
+    FilteredTermEnum termEnum = q.getEnum(searcher.getIndexReader());
+    try {
+      int count = 0;
+      do {
+        final Term t = termEnum.term();
+        if (t != null) {
+          final int val = NumericUtils.prefixCodedToInt(t.text());
+          assertTrue("value not in bounds", val >= lower && val <= upper);
+          count++;
+        } else break;
+      } while (termEnum.next());
+      assertFalse(termEnum.next());
+      if (VERBOSE) System.out.println("TermEnum on 'field4' for range [" + lower + "," + upper + "] contained " + count + " terms.");
+    } finally {
+      termEnum.close();
+    }
+  }
+  
+  @Test
+  public void testEnum() throws Exception {
+    int count=3000;
+    int lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
+    // test enum with values
+    testEnum(lower, upper);
+    // test empty enum
+    testEnum(upper, lower);
+    // test empty enum outside of bounds
+    lower = distance*noDocs+startOffset;
+    upper = 2 * lower;
+    testEnum(lower, upper);
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java b/lucene/backwards/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
new file mode 100644
index 0000000..b0c0e71
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
@@ -0,0 +1,596 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericField;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util._TestUtil;
+
+import org.junit.Test;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+public class TestNumericRangeQuery64 extends LuceneTestCase {
+  // distance of entries
+  private static final long distance = 66666L;
+  // shift the starting of the values to the left, to also have negative values:
+  private static final long startOffset = - 1L << 31;
+  // number of docs to generate for testing
+  private static int noDocs;
+  
+  private static Directory directory = null;
+  private static IndexReader reader = null;
+  private static IndexSearcher searcher = null;
+  
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    noDocs = atLeast(4096);
+    directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(_TestUtil.nextInt(random, 100, 1000))
+        .setMergePolicy(newLogMergePolicy()));
+    
+    NumericField
+      field8 = new NumericField("field8", 8, Field.Store.YES, true),
+      field6 = new NumericField("field6", 6, Field.Store.YES, true),
+      field4 = new NumericField("field4", 4, Field.Store.YES, true),
+      field2 = new NumericField("field2", 2, Field.Store.YES, true),
+      fieldNoTrie = new NumericField("field"+Integer.MAX_VALUE, Integer.MAX_VALUE, Field.Store.YES, true),
+      ascfield8 = new NumericField("ascfield8", 8, Field.Store.NO, true),
+      ascfield6 = new NumericField("ascfield6", 6, Field.Store.NO, true),
+      ascfield4 = new NumericField("ascfield4", 4, Field.Store.NO, true),
+      ascfield2 = new NumericField("ascfield2", 2, Field.Store.NO, true);
+    
+    Document doc = new Document();
+    // add fields, that have a distance to test general functionality
+    doc.add(field8); doc.add(field6); doc.add(field4); doc.add(field2); doc.add(fieldNoTrie);
+    // add ascending fields with a distance of 1, beginning at -noDocs/2 to test the correct splitting of range and inclusive/exclusive
+    doc.add(ascfield8); doc.add(ascfield6); doc.add(ascfield4); doc.add(ascfield2);
+    
+    // Add a series of noDocs docs with increasing long values, by updating the fields
+    for (int l=0; l<noDocs; l++) {
+      long val=distance*l+startOffset;
+      field8.setLongValue(val);
+      field6.setLongValue(val);
+      field4.setLongValue(val);
+      field2.setLongValue(val);
+      fieldNoTrie.setLongValue(val);
+
+      val=l-(noDocs/2);
+      ascfield8.setLongValue(val);
+      ascfield6.setLongValue(val);
+      ascfield4.setLongValue(val);
+      ascfield2.setLongValue(val);
+      writer.addDocument(doc);
+    }
+  
+    reader = writer.getReader();
+    searcher=newSearcher(reader);
+    writer.close();
+  }
+  
+  @AfterClass
+  public static void afterClass() throws Exception {
+    searcher.close();
+    searcher = null;
+    reader.close();
+    reader = null;
+    directory.close();
+    directory = null;
+  }
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    // set the theoretical maximum term count for 8bit (see docs for the number)
+    // super.tearDown will restore the default
+    BooleanQuery.setMaxClauseCount(7*255*2 + 255);
+  }
+  
+  /** test for constant score + boolean query + filter, the other tests only use the constant score mode */
+  private void testRange(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int count=3000;
+    long lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
+    NumericRangeQuery<Long> q = NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
+    NumericRangeFilter<Long> f = NumericRangeFilter.newLongRange(field, precisionStep, lower, upper, true, true);
+    int lastTerms = 0;
+    for (byte i=0; i<3; i++) {
+      TopDocs topDocs;
+      int terms;
+      String type;
+      q.clearTotalNumberOfTerms();
+      f.clearTotalNumberOfTerms();
+      switch (i) {
+        case 0:
+          type = " (constant score filter rewrite)";
+          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
+          topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
+          terms = q.getTotalNumberOfTerms();
+          break;
+        case 1:
+          type = " (constant score boolean rewrite)";
+          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
+          topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
+          terms = q.getTotalNumberOfTerms();
+          break;
+        case 2:
+          type = " (filter)";
+          topDocs = searcher.search(new MatchAllDocsQuery(), f, noDocs, Sort.INDEXORDER);
+          terms = f.getTotalNumberOfTerms();
+          break;
+        default:
+          return;
+      }
+      if (VERBOSE) System.out.println("Found "+terms+" distinct terms in range for field '"+field+"'"+type+".");
+      ScoreDoc[] sd = topDocs.scoreDocs;
+      assertNotNull(sd);
+      assertEquals("Score doc count"+type, count, sd.length );
+      Document doc=searcher.doc(sd[0].doc);
+      assertEquals("First doc"+type, 2*distance+startOffset, Long.parseLong(doc.get(field)) );
+      doc=searcher.doc(sd[sd.length-1].doc);
+      assertEquals("Last doc"+type, (1+count)*distance+startOffset, Long.parseLong(doc.get(field)) );
+      if (i>0 && 
+          (searcher.getIndexReader().getSequentialSubReaders() == null || 
+           searcher.getIndexReader().getSequentialSubReaders().length == 1)) {
+        assertEquals("Distinct term number is equal for all query types", lastTerms, terms);
+      }
+      lastTerms = terms;
+    }
+  }
+
+  @Test
+  public void testRange_8bit() throws Exception {
+    testRange(8);
+  }
+  
+  @Test
+  public void testRange_6bit() throws Exception {
+    testRange(6);
+  }
+  
+  @Test
+  public void testRange_4bit() throws Exception {
+    testRange(4);
+  }
+  
+  @Test
+  public void testRange_2bit() throws Exception {
+    testRange(2);
+  }
+  
+  @Test
+  public void testInverseRange() throws Exception {
+    NumericRangeFilter<Long> f = NumericRangeFilter.newLongRange("field8", 8, 1000L, -1000L, true, true);
+    assertSame("A inverse range should return the EMPTY_DOCIDSET instance", DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(searcher.getIndexReader()));
+    f = NumericRangeFilter.newLongRange("field8", 8, Long.MAX_VALUE, null, false, false);
+    assertSame("A exclusive range starting with Long.MAX_VALUE should return the EMPTY_DOCIDSET instance",
+      DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(searcher.getIndexReader()));
+    f = NumericRangeFilter.newLongRange("field8", 8, null, Long.MIN_VALUE, false, false);
+    assertSame("A exclusive range ending with Long.MIN_VALUE should return the EMPTY_DOCIDSET instance",
+      DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(searcher.getIndexReader()));
+  }
+  
+  @Test
+  public void testOneMatchQuery() throws Exception {
+    NumericRangeQuery<Long> q = NumericRangeQuery.newLongRange("ascfield8", 8, 1000L, 1000L, true, true);
+    assertSame(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE, q.getRewriteMethod());
+    TopDocs topDocs = searcher.search(q, noDocs);
+    ScoreDoc[] sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", 1, sd.length );
+  }
+  
+  private void testLeftOpenRange(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int count=3000;
+    long upper=(count-1)*distance + (distance/3) + startOffset;
+    NumericRangeQuery<Long> q=NumericRangeQuery.newLongRange(field, precisionStep, null, upper, true, true);
+    TopDocs topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
+    if (VERBOSE) System.out.println("Found "+q.getTotalNumberOfTerms()+" distinct terms in left open range for field '"+field+"'.");
+    ScoreDoc[] sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", count, sd.length );
+    Document doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", startOffset, Long.parseLong(doc.get(field)) );
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (count-1)*distance+startOffset, Long.parseLong(doc.get(field)) );
+
+    q=NumericRangeQuery.newLongRange(field, precisionStep, null, upper, false, true);
+    topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
+    sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", count, sd.length );
+    doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", startOffset, Long.parseLong(doc.get(field)) );
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (count-1)*distance+startOffset, Long.parseLong(doc.get(field)) );
+  }
+  
+  @Test
+  public void testLeftOpenRange_8bit() throws Exception {
+    testLeftOpenRange(8);
+  }
+  
+  @Test
+  public void testLeftOpenRange_6bit() throws Exception {
+    testLeftOpenRange(6);
+  }
+  
+  @Test
+  public void testLeftOpenRange_4bit() throws Exception {
+    testLeftOpenRange(4);
+  }
+  
+  @Test
+  public void testLeftOpenRange_2bit() throws Exception {
+    testLeftOpenRange(2);
+  }
+  
+  private void testRightOpenRange(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int count=3000;
+    long lower=(count-1)*distance + (distance/3) +startOffset;
+    NumericRangeQuery<Long> q=NumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, true);
+    TopDocs topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
+    if (VERBOSE) System.out.println("Found "+q.getTotalNumberOfTerms()+" distinct terms in right open range for field '"+field+"'.");
+    ScoreDoc[] sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", noDocs-count, sd.length );
+    Document doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", count*distance+startOffset, Long.parseLong(doc.get(field)) );
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (noDocs-1)*distance+startOffset, Long.parseLong(doc.get(field)) );
+
+    q=NumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, false);
+    topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
+    sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", noDocs-count, sd.length );
+    doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", count*distance+startOffset, Long.parseLong(doc.get(field)) );
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (noDocs-1)*distance+startOffset, Long.parseLong(doc.get(field)) );
+  }
+  
+  @Test
+  public void testRightOpenRange_8bit() throws Exception {
+    testRightOpenRange(8);
+  }
+  
+  @Test
+  public void testRightOpenRange_6bit() throws Exception {
+    testRightOpenRange(6);
+  }
+  
+  @Test
+  public void testRightOpenRange_4bit() throws Exception {
+    testRightOpenRange(4);
+  }
+  
+  @Test
+  public void testRightOpenRange_2bit() throws Exception {
+    testRightOpenRange(2);
+  }
+  
+  @Test
+  public void testInfiniteValues() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    doc.add(new NumericField("double").setDoubleValue(Double.NEGATIVE_INFINITY));
+    doc.add(new NumericField("long").setLongValue(Long.MIN_VALUE));
+    writer.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new NumericField("double").setDoubleValue(Double.POSITIVE_INFINITY));
+    doc.add(new NumericField("long").setLongValue(Long.MAX_VALUE));
+    writer.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new NumericField("double").setDoubleValue(0.0));
+    doc.add(new NumericField("long").setLongValue(0L));
+    writer.addDocument(doc);
+    writer.close();
+    
+    IndexSearcher s = new IndexSearcher(dir);
+    
+    Query q=NumericRangeQuery.newLongRange("long", null, null, true, true);
+    TopDocs topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+    
+    q=NumericRangeQuery.newLongRange("long", null, null, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q=NumericRangeQuery.newLongRange("long", Long.MIN_VALUE, Long.MAX_VALUE, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+    
+    q=NumericRangeQuery.newLongRange("long", Long.MIN_VALUE, Long.MAX_VALUE, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
+
+    q=NumericRangeQuery.newDoubleRange("double", null, null, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q=NumericRangeQuery.newDoubleRange("double", null, null, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    s.close();
+    dir.close();
+  }
+  
+  private void testRandomTrieAndClassicRangeQuery(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int termCountT=0,termCountC=0;
+    int num = _TestUtil.nextInt(random, 10, 20);
+    for (int i = 0; i < num; i++) {
+      long lower=(long)(random.nextDouble()*noDocs*distance)+startOffset;
+      long upper=(long)(random.nextDouble()*noDocs*distance)+startOffset;
+      if (lower>upper) {
+        long a=lower; lower=upper; upper=a;
+      }
+      // test inclusive range
+      NumericRangeQuery<Long> tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
+      TermRangeQuery cq=new TermRangeQuery(field, NumericUtils.longToPrefixCoded(lower), NumericUtils.longToPrefixCoded(upper), true, true);
+      TopDocs tTopDocs = searcher.search(tq, 1);
+      TopDocs cTopDocs = searcher.search(cq, 1);
+      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
+      termCountT += tq.getTotalNumberOfTerms();
+      termCountC += cq.getTotalNumberOfTerms();
+      // test exclusive range
+      tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, false);
+      cq=new TermRangeQuery(field, NumericUtils.longToPrefixCoded(lower), NumericUtils.longToPrefixCoded(upper), false, false);
+      tTopDocs = searcher.search(tq, 1);
+      cTopDocs = searcher.search(cq, 1);
+      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
+      termCountT += tq.getTotalNumberOfTerms();
+      termCountC += cq.getTotalNumberOfTerms();
+      // test left exclusive range
+      tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, true);
+      cq=new TermRangeQuery(field, NumericUtils.longToPrefixCoded(lower), NumericUtils.longToPrefixCoded(upper), false, true);
+      tTopDocs = searcher.search(tq, 1);
+      cTopDocs = searcher.search(cq, 1);
+      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
+      termCountT += tq.getTotalNumberOfTerms();
+      termCountC += cq.getTotalNumberOfTerms();
+      // test right exclusive range
+      tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, false);
+      cq=new TermRangeQuery(field, NumericUtils.longToPrefixCoded(lower), NumericUtils.longToPrefixCoded(upper), true, false);
+      tTopDocs = searcher.search(tq, 1);
+      cTopDocs = searcher.search(cq, 1);
+      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
+      termCountT += tq.getTotalNumberOfTerms();
+      termCountC += cq.getTotalNumberOfTerms();
+    }
+    if (precisionStep == Integer.MAX_VALUE && 
+        (searcher.getIndexReader().getSequentialSubReaders() == null || 
+         searcher.getIndexReader().getSequentialSubReaders().length == 1)) {
+      assertEquals("Total number of terms should be equal for unlimited precStep", termCountT, termCountC);
+    } else if (VERBOSE) {
+      System.out.println("Average number of terms during random search on '" + field + "':");
+      System.out.println(" Trie query: " + (((double)termCountT)/(num * 4)));
+      System.out.println(" Classical query: " + (((double)termCountC)/(num * 4)));
+    }
+  }
+  
+  @Test
+  public void testRandomTrieAndClassicRangeQuery_8bit() throws Exception {
+    testRandomTrieAndClassicRangeQuery(8);
+  }
+  
+  @Test
+  public void testRandomTrieAndClassicRangeQuery_6bit() throws Exception {
+    testRandomTrieAndClassicRangeQuery(6);
+  }
+  
+  @Test
+  public void testRandomTrieAndClassicRangeQuery_4bit() throws Exception {
+    testRandomTrieAndClassicRangeQuery(4);
+  }
+  
+  @Test
+  public void testRandomTrieAndClassicRangeQuery_2bit() throws Exception {
+    testRandomTrieAndClassicRangeQuery(2);
+  }
+  
+  @Test
+  public void testRandomTrieAndClassicRangeQuery_NoTrie() throws Exception {
+    testRandomTrieAndClassicRangeQuery(Integer.MAX_VALUE);
+  }
+  
+  private void testRangeSplit(int precisionStep) throws Exception {
+    String field="ascfield"+precisionStep;
+    // 10 random tests
+    int num = _TestUtil.nextInt(random, 10, 20);
+    for (int i = 0; i < num; i++) {
+      long lower=(long)(random.nextDouble()*noDocs - noDocs/2);
+      long upper=(long)(random.nextDouble()*noDocs - noDocs/2);
+      if (lower>upper) {
+        long a=lower; lower=upper; upper=a;
+      }
+      // test inclusive range
+      Query tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
+      TopDocs tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
+      // test exclusive range
+      tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, false);
+      tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to exclusive range length", Math.max(upper-lower-1, 0), tTopDocs.totalHits );
+      // test left exclusive range
+      tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, true);
+      tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
+      // test right exclusive range
+      tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, false);
+      tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
+    }
+  }
+
+  @Test
+  public void testRangeSplit_8bit() throws Exception {
+    testRangeSplit(8);
+  }
+  
+  @Test
+  public void testRangeSplit_6bit() throws Exception {
+    testRangeSplit(6);
+  }
+  
+  @Test
+  public void testRangeSplit_4bit() throws Exception {
+    testRangeSplit(4);
+  }
+  
+  @Test
+  public void testRangeSplit_2bit() throws Exception {
+    testRangeSplit(2);
+  }
+  
+  /** we fake a double test using long2double conversion of NumericUtils */
+  private void testDoubleRange(int precisionStep) throws Exception {
+    final String field="ascfield"+precisionStep;
+    final long lower=-1000L, upper=+2000L;
+    
+    Query tq=NumericRangeQuery.newDoubleRange(field, precisionStep,
+      NumericUtils.sortableLongToDouble(lower), NumericUtils.sortableLongToDouble(upper), true, true);
+    TopDocs tTopDocs = searcher.search(tq, 1);
+    assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
+    
+    Filter tf=NumericRangeFilter.newDoubleRange(field, precisionStep,
+      NumericUtils.sortableLongToDouble(lower), NumericUtils.sortableLongToDouble(upper), true, true);
+    tTopDocs = searcher.search(new MatchAllDocsQuery(), tf, 1);
+    assertEquals("Returned count of range filter must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
+  }
+
+  @Test
+  public void testDoubleRange_8bit() throws Exception {
+    testDoubleRange(8);
+  }
+  
+  @Test
+  public void testDoubleRange_6bit() throws Exception {
+    testDoubleRange(6);
+  }
+  
+  @Test
+  public void testDoubleRange_4bit() throws Exception {
+    testDoubleRange(4);
+  }
+  
+  @Test
+  public void testDoubleRange_2bit() throws Exception {
+    testDoubleRange(2);
+  }
+  
+  private void testSorting(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    // 10 random tests, the index order is ascending,
+    // so using a reverse sort field should retun descending documents
+    int num = _TestUtil.nextInt(random, 10, 20);
+    for (int i = 0; i < num; i++) {
+      long lower=(long)(random.nextDouble()*noDocs*distance)+startOffset;
+      long upper=(long)(random.nextDouble()*noDocs*distance)+startOffset;
+      if (lower>upper) {
+        long a=lower; lower=upper; upper=a;
+      }
+      Query tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
+      TopDocs topDocs = searcher.search(tq, null, noDocs, new Sort(new SortField(field, SortField.LONG, true)));
+      if (topDocs.totalHits==0) continue;
+      ScoreDoc[] sd = topDocs.scoreDocs;
+      assertNotNull(sd);
+      long last=Long.parseLong(searcher.doc(sd[0].doc).get(field));
+      for (int j=1; j<sd.length; j++) {
+        long act=Long.parseLong(searcher.doc(sd[j].doc).get(field));
+        assertTrue("Docs should be sorted backwards", last>act );
+        last=act;
+      }
+    }
+  }
+
+  @Test
+  public void testSorting_8bit() throws Exception {
+    testSorting(8);
+  }
+  
+  @Test
+  public void testSorting_6bit() throws Exception {
+    testSorting(6);
+  }
+  
+  @Test
+  public void testSorting_4bit() throws Exception {
+    testSorting(4);
+  }
+  
+  @Test
+  public void testSorting_2bit() throws Exception {
+    testSorting(2);
+  }
+  
+  @Test
+  public void testEqualsAndHash() throws Exception {
+    QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test1", 4, 10L, 20L, true, true));
+    QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test2", 4, 10L, 20L, false, true));
+    QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test3", 4, 10L, 20L, true, false));
+    QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test4", 4, 10L, 20L, false, false));
+    QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test5", 4, 10L, null, true, true));
+    QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test6", 4, null, 20L, true, true));
+    QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test7", 4, null, null, true, true));
+    QueryUtils.checkEqual(
+      NumericRangeQuery.newLongRange("test8", 4, 10L, 20L, true, true), 
+      NumericRangeQuery.newLongRange("test8", 4, 10L, 20L, true, true)
+    );
+    QueryUtils.checkUnequal(
+      NumericRangeQuery.newLongRange("test9", 4, 10L, 20L, true, true), 
+      NumericRangeQuery.newLongRange("test9", 8, 10L, 20L, true, true)
+    );
+    QueryUtils.checkUnequal(
+      NumericRangeQuery.newLongRange("test10a", 4, 10L, 20L, true, true), 
+      NumericRangeQuery.newLongRange("test10b", 4, 10L, 20L, true, true)
+    );
+    QueryUtils.checkUnequal(
+      NumericRangeQuery.newLongRange("test11", 4, 10L, 20L, true, true), 
+      NumericRangeQuery.newLongRange("test11", 4, 20L, 10L, true, true)
+    );
+    QueryUtils.checkUnequal(
+      NumericRangeQuery.newLongRange("test12", 4, 10L, 20L, true, true), 
+      NumericRangeQuery.newLongRange("test12", 4, 10L, 20L, false, true)
+    );
+    QueryUtils.checkUnequal(
+      NumericRangeQuery.newLongRange("test13", 4, 10L, 20L, true, true), 
+      NumericRangeQuery.newFloatRange("test13", 4, 10f, 20f, true, true)
+    );
+     // difference to int range is tested in TestNumericRangeQuery32
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestParallelMultiSearcher.java b/lucene/backwards/src/test/org/apache/lucene/search/TestParallelMultiSearcher.java
new file mode 100644
index 0000000..44e35ca
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestParallelMultiSearcher.java
@@ -0,0 +1,51 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.lucene.util._TestUtil;
+
+/**
+ * Unit tests for the ParallelMultiSearcher 
+ */
+public class TestParallelMultiSearcher extends TestMultiSearcher {
+  List<ExecutorService> pools = new ArrayList<ExecutorService>();
+
+  @Override
+  public void tearDown() throws Exception {
+    for (ExecutorService exec : pools)
+      exec.awaitTermination(1000, TimeUnit.MILLISECONDS);
+    pools.clear();
+    super.tearDown();
+  }
+
+  @Override
+  protected MultiSearcher getMultiSearcherInstance(Searcher[] searchers)
+    throws IOException {
+    ExecutorService exec = Executors.newFixedThreadPool(_TestUtil.nextInt(random, 2, 8));
+    pools.add(exec);
+    return new ParallelMultiSearcher(exec, searchers);
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
new file mode 100644
index 0000000..7b0da44
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
@@ -0,0 +1,101 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermEnum;
+import org.apache.lucene.store.Directory;
+
+import java.io.IOException;
+import java.util.LinkedList;
+
+/**
+ * This class tests PhrasePrefixQuery class.
+ */
+public class TestPhrasePrefixQuery extends LuceneTestCase {
+  
+  /**
+     *
+     */
+  public void testPhrasePrefix() throws IOException {
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, indexStore);
+    Document doc1 = new Document();
+    Document doc2 = new Document();
+    Document doc3 = new Document();
+    Document doc4 = new Document();
+    Document doc5 = new Document();
+    doc1.add(newField("body", "blueberry pie", Field.Store.YES,
+        Field.Index.ANALYZED));
+    doc2.add(newField("body", "blueberry strudel", Field.Store.YES,
+        Field.Index.ANALYZED));
+    doc3.add(newField("body", "blueberry pizza", Field.Store.YES,
+        Field.Index.ANALYZED));
+    doc4.add(newField("body", "blueberry chewing gum", Field.Store.YES,
+        Field.Index.ANALYZED));
+    doc5.add(newField("body", "piccadilly circus", Field.Store.YES,
+        Field.Index.ANALYZED));
+    writer.addDocument(doc1);
+    writer.addDocument(doc2);
+    writer.addDocument(doc3);
+    writer.addDocument(doc4);
+    writer.addDocument(doc5);
+    IndexReader reader = writer.getReader();
+    writer.close();
+    
+    IndexSearcher searcher = newSearcher(reader);
+    
+    // PhrasePrefixQuery query1 = new PhrasePrefixQuery();
+    MultiPhraseQuery query1 = new MultiPhraseQuery();
+    // PhrasePrefixQuery query2 = new PhrasePrefixQuery();
+    MultiPhraseQuery query2 = new MultiPhraseQuery();
+    query1.add(new Term("body", "blueberry"));
+    query2.add(new Term("body", "strawberry"));
+    
+    LinkedList<Term> termsWithPrefix = new LinkedList<Term>();
+    
+    // this TermEnum gives "piccadilly", "pie" and "pizza".
+    String prefix = "pi";
+    TermEnum te = reader.terms(new Term("body", prefix + "*"));
+    do {
+        if (te.term().text().startsWith(prefix))
+        {
+            termsWithPrefix.add(te.term());
+        }
+    } while (te.next());
+    
+    query1.add(termsWithPrefix.toArray(new Term[0]));
+    query2.add(termsWithPrefix.toArray(new Term[0]));
+    
+    ScoreDoc[] result;
+    result = searcher.search(query1, null, 1000).scoreDocs;
+    assertEquals(2, result.length);
+    
+    result = searcher.search(query2, null, 1000).scoreDocs;
+    assertEquals(0, result.length);
+    searcher.close();
+    reader.close();
+    indexStore.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestPhraseQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/TestPhraseQuery.java
new file mode 100644
index 0000000..0d07f20
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestPhraseQuery.java
@@ -0,0 +1,697 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.*;
+import org.apache.lucene.analysis.tokenattributes.*;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.store.*;
+import org.apache.lucene.util.Version;
+import org.apache.lucene.util._TestUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Random;
+
+/**
+ * Tests {@link PhraseQuery}.
+ *
+ * @see TestPositionIncrement
+ */
+public class TestPhraseQuery extends LuceneTestCase {
+
+  /** threshold for comparing floats */
+  public static final float SCORE_COMP_THRESH = 1e-6f;
+  
+  private static IndexSearcher searcher;
+  private static IndexReader reader;
+  private PhraseQuery query;
+  private static Directory directory;
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    directory = newDirectory();
+    Analyzer analyzer = new Analyzer() {
+      @Override
+      public TokenStream tokenStream(String fieldName, Reader reader) {
+        return new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
+      }
+
+      @Override
+      public int getPositionIncrementGap(String fieldName) {
+        return 100;
+      }
+    };
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory, analyzer);
+    
+    Document doc = new Document();
+    doc.add(newField("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(newField("repeated", "this is a repeated field - first part", Field.Store.YES, Field.Index.ANALYZED));
+    Fieldable repeatedField = newField("repeated", "second part of a repeated field", Field.Store.YES, Field.Index.ANALYZED);
+    doc.add(repeatedField);
+    doc.add(newField("palindrome", "one two three two one", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(newField("nonexist", "phrase exist notexist exist found", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(newField("nonexist", "phrase exist notexist exist found", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+
+    reader = writer.getReader();
+    writer.close();
+
+    searcher = newSearcher(reader);
+  }
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    query = new PhraseQuery();
+  }
+
+  @AfterClass
+  public static void afterClass() throws Exception {
+    searcher.close();
+    searcher = null;
+    reader.close();
+    reader = null;
+    directory.close();
+    directory = null;
+  }
+
+  public void testNotCloseEnough() throws Exception {
+    query.setSlop(2);
+    query.add(new Term("field", "one"));
+    query.add(new Term("field", "five"));
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+    QueryUtils.check(random, query,searcher);
+  }
+
+  public void testBarelyCloseEnough() throws Exception {
+    query.setSlop(3);
+    query.add(new Term("field", "one"));
+    query.add(new Term("field", "five"));
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    QueryUtils.check(random, query,searcher);
+  }
+
+  /**
+   * Ensures slop of 0 works for exact matches, but not reversed
+   */
+  public void testExact() throws Exception {
+    // slop is zero by default
+    query.add(new Term("field", "four"));
+    query.add(new Term("field", "five"));
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("exact match", 1, hits.length);
+    QueryUtils.check(random, query,searcher);
+
+
+    query = new PhraseQuery();
+    query.add(new Term("field", "two"));
+    query.add(new Term("field", "one"));
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("reverse not exact", 0, hits.length);
+    QueryUtils.check(random, query,searcher);
+  }
+
+  public void testSlop1() throws Exception {
+    // Ensures slop of 1 works with terms in order.
+    query.setSlop(1);
+    query.add(new Term("field", "one"));
+    query.add(new Term("field", "two"));
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("in order", 1, hits.length);
+    QueryUtils.check(random, query,searcher);
+
+
+    // Ensures slop of 1 does not work for phrases out of order;
+    // must be at least 2.
+    query = new PhraseQuery();
+    query.setSlop(1);
+    query.add(new Term("field", "two"));
+    query.add(new Term("field", "one"));
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("reversed, slop not 2 or more", 0, hits.length);
+    QueryUtils.check(random, query,searcher);
+  }
+
+  /**
+   * As long as slop is at least 2, terms can be reversed
+   */
+  public void testOrderDoesntMatter() throws Exception {
+    query.setSlop(2); // must be at least two for reverse order match
+    query.add(new Term("field", "two"));
+    query.add(new Term("field", "one"));
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("just sloppy enough", 1, hits.length);
+    QueryUtils.check(random, query,searcher);
+
+
+    query = new PhraseQuery();
+    query.setSlop(2);
+    query.add(new Term("field", "three"));
+    query.add(new Term("field", "one"));
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("not sloppy enough", 0, hits.length);
+    QueryUtils.check(random, query,searcher);
+
+  }
+
+  /**
+   * slop is the total number of positional moves allowed
+   * to line up a phrase
+   */
+  public void testMulipleTerms() throws Exception {
+    query.setSlop(2);
+    query.add(new Term("field", "one"));
+    query.add(new Term("field", "three"));
+    query.add(new Term("field", "five"));
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("two total moves", 1, hits.length);
+    QueryUtils.check(random, query,searcher);
+
+
+    query = new PhraseQuery();
+    query.setSlop(5); // it takes six moves to match this phrase
+    query.add(new Term("field", "five"));
+    query.add(new Term("field", "three"));
+    query.add(new Term("field", "one"));
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("slop of 5 not close enough", 0, hits.length);
+    QueryUtils.check(random, query,searcher);
+
+
+    query.setSlop(6);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("slop of 6 just right", 1, hits.length);
+    QueryUtils.check(random, query,searcher);
+
+  }
+  
+  public void testPhraseQueryWithStopAnalyzer() throws Exception {
+    Directory directory = newDirectory();
+    StopAnalyzer stopAnalyzer = new StopAnalyzer(Version.LUCENE_24);
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory, 
+        newIndexWriterConfig( Version.LUCENE_24, stopAnalyzer));
+    Document doc = new Document();
+    doc.add(newField("field", "the stop words are here", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    IndexReader reader = writer.getReader();
+    writer.close();
+
+    IndexSearcher searcher = newSearcher(reader);
+
+    // valid exact phrase query
+    PhraseQuery query = new PhraseQuery();
+    query.add(new Term("field","stop"));
+    query.add(new Term("field","words"));
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    QueryUtils.check(random, query,searcher);
+
+
+    // StopAnalyzer as of 2.4 does not leave "holes", so this matches.
+    query = new PhraseQuery();
+    query.add(new Term("field", "words"));
+    query.add(new Term("field", "here"));
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    QueryUtils.check(random, query,searcher);
+
+
+    searcher.close();
+    reader.close();
+    directory.close();
+  }
+  
+  public void testPhraseQueryInConjunctionScorer() throws Exception {
+    Directory directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory);
+    
+    Document doc = new Document();
+    doc.add(newField("source", "marketing info", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(newField("contents", "foobar", Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(newField("source", "marketing info", Field.Store.YES, Field.Index.ANALYZED)); 
+    writer.addDocument(doc);
+    
+    IndexReader reader = writer.getReader();
+    writer.close();
+    
+    IndexSearcher searcher = newSearcher(reader);
+    
+    PhraseQuery phraseQuery = new PhraseQuery();
+    phraseQuery.add(new Term("source", "marketing"));
+    phraseQuery.add(new Term("source", "info"));
+    ScoreDoc[] hits = searcher.search(phraseQuery, null, 1000).scoreDocs;
+    assertEquals(2, hits.length);
+    QueryUtils.check(random, phraseQuery,searcher);
+
+    
+    TermQuery termQuery = new TermQuery(new Term("contents","foobar"));
+    BooleanQuery booleanQuery = new BooleanQuery();
+    booleanQuery.add(termQuery, BooleanClause.Occur.MUST);
+    booleanQuery.add(phraseQuery, BooleanClause.Occur.MUST);
+    hits = searcher.search(booleanQuery, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    QueryUtils.check(random, termQuery,searcher);
+
+    
+    searcher.close();
+    reader.close();
+    
+    writer = new RandomIndexWriter(random, directory, 
+        newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
+    doc = new Document();
+    doc.add(newField("contents", "map entry woo", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+
+    doc = new Document();
+    doc.add(newField("contents", "woo map entry", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+
+    doc = new Document();
+    doc.add(newField("contents", "map foobarword entry woo", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+
+    reader = writer.getReader();
+    writer.close();
+    
+    searcher = newSearcher(reader);
+    
+    termQuery = new TermQuery(new Term("contents","woo"));
+    phraseQuery = new PhraseQuery();
+    phraseQuery.add(new Term("contents","map"));
+    phraseQuery.add(new Term("contents","entry"));
+    
+    hits = searcher.search(termQuery, null, 1000).scoreDocs;
+    assertEquals(3, hits.length);
+    hits = searcher.search(phraseQuery, null, 1000).scoreDocs;
+    assertEquals(2, hits.length);
+
+    
+    booleanQuery = new BooleanQuery();
+    booleanQuery.add(termQuery, BooleanClause.Occur.MUST);
+    booleanQuery.add(phraseQuery, BooleanClause.Occur.MUST);
+    hits = searcher.search(booleanQuery, null, 1000).scoreDocs;
+    assertEquals(2, hits.length);
+    
+    booleanQuery = new BooleanQuery();
+    booleanQuery.add(phraseQuery, BooleanClause.Occur.MUST);
+    booleanQuery.add(termQuery, BooleanClause.Occur.MUST);
+    hits = searcher.search(booleanQuery, null, 1000).scoreDocs;
+    assertEquals(2, hits.length);
+    QueryUtils.check(random, booleanQuery,searcher);
+
+    
+    searcher.close();
+    reader.close();
+    directory.close();
+  }
+  
+  public void testSlopScoring() throws IOException {
+    Directory directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+
+    Document doc = new Document();
+    doc.add(newField("field", "foo firstname lastname foo", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    
+    Document doc2 = new Document();
+    doc2.add(newField("field", "foo firstname zzz lastname foo", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc2);
+    
+    Document doc3 = new Document();
+    doc3.add(newField("field", "foo firstname zzz yyy lastname foo", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc3);
+    
+    IndexReader reader = writer.getReader();
+    writer.close();
+
+    IndexSearcher searcher = newSearcher(reader);
+    PhraseQuery query = new PhraseQuery();
+    query.add(new Term("field", "firstname"));
+    query.add(new Term("field", "lastname"));
+    query.setSlop(Integer.MAX_VALUE);
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(3, hits.length);
+    // Make sure that those matches where the terms appear closer to
+    // each other get a higher score:
+    assertEquals(0.71, hits[0].score, 0.01);
+    assertEquals(0, hits[0].doc);
+    assertEquals(0.44, hits[1].score, 0.01);
+    assertEquals(1, hits[1].doc);
+    assertEquals(0.31, hits[2].score, 0.01);
+    assertEquals(2, hits[2].doc);
+    QueryUtils.check(random, query,searcher);
+    searcher.close();
+    reader.close();
+    directory.close();
+  }
+  
+  public void testToString() throws Exception {
+    StopAnalyzer analyzer = new StopAnalyzer(TEST_VERSION_CURRENT);
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", analyzer);
+    qp.setEnablePositionIncrements(true);
+    PhraseQuery q = (PhraseQuery)qp.parse("\"this hi this is a test is\"");
+    assertEquals("field:\"? hi ? ? ? test\"", q.toString());
+    q.add(new Term("field", "hello"), 1);
+    assertEquals("field:\"? hi|hello ? ? ? test\"", q.toString());
+  }
+
+  public void testWrappedPhrase() throws IOException {
+    query.add(new Term("repeated", "first"));
+    query.add(new Term("repeated", "part"));
+    query.add(new Term("repeated", "second"));
+    query.add(new Term("repeated", "part"));
+    query.setSlop(100);
+
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("slop of 100 just right", 1, hits.length);
+    QueryUtils.check(random, query,searcher);
+
+    query.setSlop(99);
+
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("slop of 99 not enough", 0, hits.length);
+    QueryUtils.check(random, query,searcher);
+  }
+
+  // work on two docs like this: "phrase exist notexist exist found"
+  public void testNonExistingPhrase() throws IOException {
+    // phrase without repetitions that exists in 2 docs
+    query.add(new Term("nonexist", "phrase"));
+    query.add(new Term("nonexist", "notexist"));
+    query.add(new Term("nonexist", "found"));
+    query.setSlop(2); // would be found this way
+
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("phrase without repetitions exists in 2 docs", 2, hits.length);
+    QueryUtils.check(random, query,searcher);
+
+    // phrase with repetitions that exists in 2 docs
+    query = new PhraseQuery();
+    query.add(new Term("nonexist", "phrase"));
+    query.add(new Term("nonexist", "exist"));
+    query.add(new Term("nonexist", "exist"));
+    query.setSlop(1); // would be found 
+
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("phrase with repetitions exists in two docs", 2, hits.length);
+    QueryUtils.check(random, query,searcher);
+
+    // phrase I with repetitions that does not exist in any doc
+    query = new PhraseQuery();
+    query.add(new Term("nonexist", "phrase"));
+    query.add(new Term("nonexist", "notexist"));
+    query.add(new Term("nonexist", "phrase"));
+    query.setSlop(1000); // would not be found no matter how high the slop is
+
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("nonexisting phrase with repetitions does not exist in any doc", 0, hits.length);
+    QueryUtils.check(random, query,searcher);
+
+    // phrase II with repetitions that does not exist in any doc
+    query = new PhraseQuery();
+    query.add(new Term("nonexist", "phrase"));
+    query.add(new Term("nonexist", "exist"));
+    query.add(new Term("nonexist", "exist"));
+    query.add(new Term("nonexist", "exist"));
+    query.setSlop(1000); // would not be found no matter how high the slop is
+
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("nonexisting phrase with repetitions does not exist in any doc", 0, hits.length);
+    QueryUtils.check(random, query,searcher);
+
+  }
+
+  /**
+   * Working on a 2 fields like this:
+   *    Field("field", "one two three four five")
+   *    Field("palindrome", "one two three two one")
+   * Phrase of size 2 occuriong twice, once in order and once in reverse, 
+   * because doc is a palyndrome, is counted twice. 
+   * Also, in this case order in query does not matter. 
+   * Also, when an exact match is found, both sloppy scorer and exact scorer scores the same.   
+   */
+  public void testPalyndrome2() throws Exception {
+    
+    // search on non palyndrome, find phrase with no slop, using exact phrase scorer
+    query.setSlop(0); // to use exact phrase scorer
+    query.add(new Term("field", "two"));
+    query.add(new Term("field", "three"));
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("phrase found with exact phrase scorer", 1, hits.length);
+    float score0 = hits[0].score;
+    //System.out.println("(exact) field: two three: "+score0);
+    QueryUtils.check(random, query,searcher);
+
+    // search on non palyndrome, find phrase with slop 2, though no slop required here.
+    query.setSlop(2); // to use sloppy scorer 
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("just sloppy enough", 1, hits.length);
+    float score1 = hits[0].score;
+    //System.out.println("(sloppy) field: two three: "+score1);
+    assertEquals("exact scorer and sloppy scorer score the same when slop does not matter",score0, score1, SCORE_COMP_THRESH);
+    QueryUtils.check(random, query,searcher);
+
+    // search ordered in palyndrome, find it twice
+    query = new PhraseQuery();
+    query.setSlop(2); // must be at least two for both ordered and reversed to match
+    query.add(new Term("palindrome", "two"));
+    query.add(new Term("palindrome", "three"));
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("just sloppy enough", 1, hits.length);
+    //float score2 = hits[0].score;
+    //System.out.println("palindrome: two three: "+score2);
+    QueryUtils.check(random, query,searcher);
+    
+    //commented out for sloppy-phrase efficiency (issue 736) - see SloppyPhraseScorer.phraseFreq(). 
+    //assertTrue("ordered scores higher in palindrome",score1+SCORE_COMP_THRESH<score2);
+
+    // search reveresed in palyndrome, find it twice
+    query = new PhraseQuery();
+    query.setSlop(2); // must be at least two for both ordered and reversed to match
+    query.add(new Term("palindrome", "three"));
+    query.add(new Term("palindrome", "two"));
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("just sloppy enough", 1, hits.length);
+    //float score3 = hits[0].score;
+    //System.out.println("palindrome: three two: "+score3);
+    QueryUtils.check(random, query,searcher);
+
+    //commented out for sloppy-phrase efficiency (issue 736) - see SloppyPhraseScorer.phraseFreq(). 
+    //assertTrue("reversed scores higher in palindrome",score1+SCORE_COMP_THRESH<score3);
+    //assertEquals("ordered or reversed does not matter",score2, score3, SCORE_COMP_THRESH);
+  }
+
+  /**
+   * Working on a 2 fields like this:
+   *    Field("field", "one two three four five")
+   *    Field("palindrome", "one two three two one")
+   * Phrase of size 3 occuriong twice, once in order and once in reverse, 
+   * because doc is a palyndrome, is counted twice. 
+   * Also, in this case order in query does not matter. 
+   * Also, when an exact match is found, both sloppy scorer and exact scorer scores the same.   
+   */
+  public void testPalyndrome3() throws Exception {
+    
+    // search on non palyndrome, find phrase with no slop, using exact phrase scorer
+    query.setSlop(0); // to use exact phrase scorer
+    query.add(new Term("field", "one"));
+    query.add(new Term("field", "two"));
+    query.add(new Term("field", "three"));
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("phrase found with exact phrase scorer", 1, hits.length);
+    float score0 = hits[0].score;
+    //System.out.println("(exact) field: one two three: "+score0);
+    QueryUtils.check(random, query,searcher);
+
+    // just make sure no exc:
+    searcher.explain(query, 0);
+
+    // search on non palyndrome, find phrase with slop 3, though no slop required here.
+    query.setSlop(4); // to use sloppy scorer 
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("just sloppy enough", 1, hits.length);
+    float score1 = hits[0].score;
+    //System.out.println("(sloppy) field: one two three: "+score1);
+    assertEquals("exact scorer and sloppy scorer score the same when slop does not matter",score0, score1, SCORE_COMP_THRESH);
+    QueryUtils.check(random, query,searcher);
+
+    // search ordered in palyndrome, find it twice
+    query = new PhraseQuery();
+    query.setSlop(4); // must be at least four for both ordered and reversed to match
+    query.add(new Term("palindrome", "one"));
+    query.add(new Term("palindrome", "two"));
+    query.add(new Term("palindrome", "three"));
+    hits = searcher.search(query, null, 1000).scoreDocs;
+
+    // just make sure no exc:
+    searcher.explain(query, 0);
+
+    assertEquals("just sloppy enough", 1, hits.length);
+    //float score2 = hits[0].score;
+    //System.out.println("palindrome: one two three: "+score2);
+    QueryUtils.check(random, query,searcher);
+    
+    //commented out for sloppy-phrase efficiency (issue 736) - see SloppyPhraseScorer.phraseFreq(). 
+    //assertTrue("ordered scores higher in palindrome",score1+SCORE_COMP_THRESH<score2);
+
+    // search reveresed in palyndrome, find it twice
+    query = new PhraseQuery();
+    query.setSlop(4); // must be at least four for both ordered and reversed to match
+    query.add(new Term("palindrome", "three"));
+    query.add(new Term("palindrome", "two"));
+    query.add(new Term("palindrome", "one"));
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("just sloppy enough", 1, hits.length);
+    //float score3 = hits[0].score;
+    //System.out.println("palindrome: three two one: "+score3);
+    QueryUtils.check(random, query,searcher);
+
+    //commented out for sloppy-phrase efficiency (issue 736) - see SloppyPhraseScorer.phraseFreq(). 
+    //assertTrue("reversed scores higher in palindrome",score1+SCORE_COMP_THRESH<score3);
+    //assertEquals("ordered or reversed does not matter",score2, score3, SCORE_COMP_THRESH);
+  }
+
+  // LUCENE-1280
+  public void testEmptyPhraseQuery() throws Throwable {
+    final BooleanQuery q2 = new BooleanQuery();
+    q2.add(new PhraseQuery(), BooleanClause.Occur.MUST);
+    q2.toString();
+  }
+  
+  /* test that a single term is rewritten to a term query */
+  public void testRewrite() throws IOException {
+    PhraseQuery pq = new PhraseQuery();
+    pq.add(new Term("foo", "bar"));
+    Query rewritten = pq.rewrite(searcher.getIndexReader());
+    assertTrue(rewritten instanceof TermQuery);
+  }
+
+  public void testRandomPhrases() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer analyzer = new MockAnalyzer(random);
+
+    RandomIndexWriter w  = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).setMergePolicy(newLogMergePolicy()));
+    List<List<String>> docs = new ArrayList<List<String>>();
+    Document d = new Document();
+    Field f = newField("f", "", Field.Store.NO, Field.Index.ANALYZED);
+    d.add(f);
+
+    Random r = random;
+
+    int NUM_DOCS = atLeast(10);
+    for (int i = 0; i < NUM_DOCS; i++) {
+      // must be > 4096 so it spans multiple chunks
+      int termCount = _TestUtil.nextInt(random, 4097, 8200);
+
+      List<String> doc = new ArrayList<String>();
+
+      StringBuilder sb = new StringBuilder();
+      while(doc.size() < termCount) {
+        if (r.nextInt(5) == 1 || docs.size() == 0) {
+          // make new non-empty-string term
+          String term;
+          while(true) {
+            term = _TestUtil.randomUnicodeString(r);
+            if (term.length() > 0) {
+              break;
+            }
+          }
+          TokenStream ts = analyzer.reusableTokenStream("ignore", new StringReader(term));
+          CharTermAttribute termAttr = ts.addAttribute(CharTermAttribute.class);
+          ts.reset();
+          while(ts.incrementToken()) {
+            String text = termAttr.toString();
+            doc.add(text);
+            sb.append(text).append(' ');
+          }
+          ts.end();
+          ts.close();
+        } else {
+          // pick existing sub-phrase
+          List<String> lastDoc = docs.get(r.nextInt(docs.size()));
+          int len = _TestUtil.nextInt(r, 1, 10);
+          int start = r.nextInt(lastDoc.size()-len);
+          for(int k=start;k<start+len;k++) {
+            String t = lastDoc.get(k);
+            doc.add(t);
+            sb.append(t).append(' ');
+          }
+        }
+      }
+      docs.add(doc);
+      f.setValue(sb.toString());
+      w.addDocument(d);
+    }
+
+    IndexReader reader = w.getReader();
+    IndexSearcher s = newSearcher(reader);
+    w.close();
+
+    // now search
+    int num = atLeast(10);
+    for(int i=0;i<num;i++) {
+      int docID = r.nextInt(docs.size());
+      List<String> doc = docs.get(docID);
+      
+      final int numTerm = _TestUtil.nextInt(r, 2, 20);
+      final int start = r.nextInt(doc.size()-numTerm);
+      PhraseQuery pq = new PhraseQuery();
+      StringBuilder sb = new StringBuilder();
+      for(int t=start;t<start+numTerm;t++) {
+        pq.add(new Term("f", doc.get(t)));
+        sb.append(doc.get(t)).append(' ');
+      }
+
+      TopDocs hits = s.search(pq, NUM_DOCS);
+      boolean found = false;
+      for(int j=0;j<hits.scoreDocs.length;j++) {
+        if (hits.scoreDocs[j].doc == docID) {
+          found = true;
+          break;
+        }
+      }
+
+      assertTrue("phrase '" + sb + "' not found; start=" + start, found);
+    }
+
+    reader.close();
+    s.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestPositionIncrement.java b/lucene/backwards/src/test/org/apache/lucene/search/TestPositionIncrement.java
new file mode 100644
index 0000000..6d8d2e1
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestPositionIncrement.java
@@ -0,0 +1,374 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+import java.io.IOException;
+import java.io.StringReader;
+import java.util.Collection;
+import java.util.Collections;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.StopFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.CharArraySet;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermPositions;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.analysis.LowerCaseTokenizer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.index.Payload;
+import org.apache.lucene.search.payloads.PayloadSpanUtil;
+import org.apache.lucene.search.spans.SpanNearQuery;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.search.spans.Spans;
+import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Term position unit test.
+ *
+ *
+ * @version $Revision$
+ */
+public class TestPositionIncrement extends LuceneTestCase {
+
+  public void testSetPosition() throws Exception {
+    Analyzer analyzer = new Analyzer() {
+      @Override
+      public TokenStream tokenStream(String fieldName, Reader reader) {
+        return new TokenStream() {
+          private final String[] TOKENS = {"1", "2", "3", "4", "5"};
+          private final int[] INCREMENTS = {0, 2, 1, 0, 1};
+          private int i = 0;
+
+          PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+          CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+          OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+          
+          @Override
+          public boolean incrementToken() {
+            if (i == TOKENS.length)
+              return false;
+            clearAttributes();
+            termAtt.append(TOKENS[i]);
+            offsetAtt.setOffset(i,i);
+            posIncrAtt.setPositionIncrement(INCREMENTS[i]);
+            i++;
+            return true;
+          }
+
+          @Override
+          public void reset() throws IOException {
+            super.reset();
+            this.i = 0;
+          }
+        };
+      }
+    };
+    Directory store = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, store, analyzer);
+    Document d = new Document();
+    d.add(newField("field", "bogus", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(d);
+    IndexReader reader = writer.getReader();
+    writer.close();
+    
+
+    IndexSearcher searcher = newSearcher(reader);
+    
+    TermPositions pos = searcher.getIndexReader().termPositions(new Term("field", "1"));
+    pos.next();
+    // first token should be at position 0
+    assertEquals(0, pos.nextPosition());
+    
+    pos = searcher.getIndexReader().termPositions(new Term("field", "2"));
+    pos.next();
+    // second token should be at position 2
+    assertEquals(2, pos.nextPosition());
+    
+    PhraseQuery q;
+    ScoreDoc[] hits;
+
+    q = new PhraseQuery();
+    q.add(new Term("field", "1"));
+    q.add(new Term("field", "2"));
+    hits = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+
+    // same as previous, just specify positions explicitely.
+    q = new PhraseQuery(); 
+    q.add(new Term("field", "1"),0);
+    q.add(new Term("field", "2"),1);
+    hits = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+
+    // specifying correct positions should find the phrase.
+    q = new PhraseQuery();
+    q.add(new Term("field", "1"),0);
+    q.add(new Term("field", "2"),2);
+    hits = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+
+    q = new PhraseQuery();
+    q.add(new Term("field", "2"));
+    q.add(new Term("field", "3"));
+    hits = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+
+    q = new PhraseQuery();
+    q.add(new Term("field", "3"));
+    q.add(new Term("field", "4"));
+    hits = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+
+    // phrase query would find it when correct positions are specified. 
+    q = new PhraseQuery();
+    q.add(new Term("field", "3"),0);
+    q.add(new Term("field", "4"),0);
+    hits = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+
+    // phrase query should fail for non existing searched term 
+    // even if there exist another searched terms in the same searched position. 
+    q = new PhraseQuery();
+    q.add(new Term("field", "3"),0);
+    q.add(new Term("field", "9"),0);
+    hits = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+
+    // multi-phrase query should succed for non existing searched term
+    // because there exist another searched terms in the same searched position. 
+    MultiPhraseQuery mq = new MultiPhraseQuery();
+    mq.add(new Term[]{new Term("field", "3"),new Term("field", "9")},0);
+    hits = searcher.search(mq, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+
+    q = new PhraseQuery();
+    q.add(new Term("field", "2"));
+    q.add(new Term("field", "4"));
+    hits = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+
+    q = new PhraseQuery();
+    q.add(new Term("field", "3"));
+    q.add(new Term("field", "5"));
+    hits = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+
+    q = new PhraseQuery();
+    q.add(new Term("field", "4"));
+    q.add(new Term("field", "5"));
+    hits = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+
+    q = new PhraseQuery();
+    q.add(new Term("field", "2"));
+    q.add(new Term("field", "5"));
+    hits = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+
+    // should not find "1 2" because there is a gap of 1 in the index
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field",
+                                     new StopWhitespaceAnalyzer(false));
+    q = (PhraseQuery) qp.parse("\"1 2\"");
+    hits = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+
+    // omitted stop word cannot help because stop filter swallows the increments. 
+    q = (PhraseQuery) qp.parse("\"1 stop 2\"");
+    hits = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+
+    // query parser alone won't help, because stop filter swallows the increments. 
+    qp.setEnablePositionIncrements(true);
+    q = (PhraseQuery) qp.parse("\"1 stop 2\"");
+    hits = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+
+    // stop filter alone won't help, because query parser swallows the increments. 
+    qp.setEnablePositionIncrements(false);
+    q = (PhraseQuery) qp.parse("\"1 stop 2\"");
+    hits = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+      
+    // when both qp qnd stopFilter propagate increments, we should find the doc.
+    qp = new QueryParser(TEST_VERSION_CURRENT, "field",
+                         new StopWhitespaceAnalyzer(true));
+    qp.setEnablePositionIncrements(true);
+    q = (PhraseQuery) qp.parse("\"1 stop 2\"");
+    hits = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    
+    searcher.close();
+    reader.close();
+    store.close();
+  }
+
+  private static class StopWhitespaceAnalyzer extends Analyzer {
+    boolean enablePositionIncrements;
+    final WhitespaceAnalyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
+    public StopWhitespaceAnalyzer(boolean enablePositionIncrements) {
+      this.enablePositionIncrements = enablePositionIncrements;
+    }
+    @Override
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      TokenStream ts = a.tokenStream(fieldName,reader);
+      return new StopFilter(enablePositionIncrements?TEST_VERSION_CURRENT:Version.LUCENE_24, ts,
+          new CharArraySet(TEST_VERSION_CURRENT, Collections.singleton("stop"), true));
+    }
+  }
+  
+  public void testPayloadsPos0() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir, new TestPayloadAnalyzer());
+    Document doc = new Document();
+    doc.add(new Field("content",
+                      new StringReader("a a b c d e a f g h i j a b k k")));
+    writer.addDocument(doc);
+
+    IndexReader r = writer.getReader();
+
+    TermPositions tp = r.termPositions(new Term("content", "a"));
+    int count = 0;
+    assertTrue(tp.next());
+    // "a" occurs 4 times
+    assertEquals(4, tp.freq());
+    int expected = 0;
+    assertEquals(expected, tp.nextPosition());
+    assertEquals(1, tp.nextPosition());
+    assertEquals(3, tp.nextPosition());
+    assertEquals(6, tp.nextPosition());
+
+    // only one doc has "a"
+    assertFalse(tp.next());
+
+    IndexSearcher is = newSearcher(r);
+  
+    SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a"));
+    SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k"));
+    SpanQuery[] sqs = { stq1, stq2 };
+    SpanNearQuery snq = new SpanNearQuery(sqs, 30, false);
+
+    count = 0;
+    boolean sawZero = false;
+    //System.out.println("\ngetPayloadSpans test");
+    Spans pspans = snq.getSpans(is.getIndexReader());
+    while (pspans.next()) {
+      //System.out.println(pspans.doc() + " - " + pspans.start() + " - "+ pspans.end());
+      Collection<byte[]> payloads = pspans.getPayload();
+      sawZero |= pspans.start() == 0;
+      count += payloads.size();
+    }
+    assertEquals(5, count);
+    assertTrue(sawZero);
+
+    //System.out.println("\ngetSpans test");
+    Spans spans = snq.getSpans(is.getIndexReader());
+    count = 0;
+    sawZero = false;
+    while (spans.next()) {
+      count++;
+      sawZero |= spans.start() == 0;
+      //System.out.println(spans.doc() + " - " + spans.start() + " - " + spans.end());
+    }
+    assertEquals(4, count);
+    assertTrue(sawZero);
+  
+    //System.out.println("\nPayloadSpanUtil test");
+
+    sawZero = false;
+    PayloadSpanUtil psu = new PayloadSpanUtil(is.getIndexReader());
+    Collection<byte[]> pls = psu.getPayloadsForQuery(snq);
+    count = pls.size();
+    for (byte[] bytes : pls) {
+      String s = new String(bytes);
+      //System.out.println(s);
+      sawZero |= s.equals("pos: 0");
+    }
+    assertEquals(5, count);
+    assertTrue(sawZero);
+    writer.close();
+    is.getIndexReader().close();
+    dir.close();
+  }
+}
+
+final class TestPayloadAnalyzer extends Analyzer {
+
+  @Override
+  public TokenStream tokenStream(String fieldName, Reader reader) {
+    TokenStream result = new LowerCaseTokenizer(LuceneTestCase.TEST_VERSION_CURRENT, reader);
+    return new PayloadFilter(result, fieldName);
+  }
+}
+
+final class PayloadFilter extends TokenFilter {
+  String fieldName;
+
+  int pos;
+
+  int i;
+
+  final PositionIncrementAttribute posIncrAttr;
+  final PayloadAttribute payloadAttr;
+  final CharTermAttribute termAttr;
+
+  public PayloadFilter(TokenStream input, String fieldName) {
+    super(input);
+    this.fieldName = fieldName;
+    pos = 0;
+    i = 0;
+    posIncrAttr = input.addAttribute(PositionIncrementAttribute.class);
+    payloadAttr = input.addAttribute(PayloadAttribute.class);
+    termAttr = input.addAttribute(CharTermAttribute.class);
+  }
+
+  @Override
+  public boolean incrementToken() throws IOException {
+    if (input.incrementToken()) {
+      payloadAttr.setPayload(new Payload(("pos: " + pos).getBytes()));
+      int posIncr;
+      if (i % 2 == 1) {
+        posIncr = 1;
+      } else {
+        posIncr = 0;
+      }
+      posIncrAttr.setPositionIncrement(posIncr);
+      pos += posIncr;
+      if (TestPositionIncrement.VERBOSE) {
+        System.out.println("term=" + termAttr + " pos=" + pos);
+      }
+      i++;
+      return true;
+    } else {
+      return false;
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java b/lucene/backwards/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java
new file mode 100644
index 0000000..4ef962c
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java
@@ -0,0 +1,97 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestPositiveScoresOnlyCollector extends LuceneTestCase {
+
+  private static final class SimpleScorer extends Scorer {
+    private int idx = -1;
+    
+    public SimpleScorer(Weight weight) {
+      super(weight);
+    }
+    
+    @Override public float score() throws IOException {
+      return idx == scores.length ? Float.NaN : scores[idx];
+    }
+
+    @Override public int docID() { return idx; }
+
+    @Override public int nextDoc() throws IOException {
+      return ++idx != scores.length ? idx : NO_MORE_DOCS;
+    }
+    
+    @Override public int advance(int target) throws IOException {
+      idx = target;
+      return idx < scores.length ? idx : NO_MORE_DOCS;
+    }
+  }
+
+  // The scores must have positive as well as negative values
+  private static final float[] scores = new float[] { 0.7767749f, -1.7839992f,
+      8.9925785f, 7.9608946f, -0.07948637f, 2.6356435f, 7.4950366f, 7.1490803f,
+      -8.108544f, 4.961808f, 2.2423935f, -7.285586f, 4.6699767f };
+
+  public void testNegativeScores() throws Exception {
+  
+    // The Top*Collectors previously filtered out documents with <= scores. This
+    // behavior has changed. This test checks that if PositiveOnlyScoresFilter
+    // wraps one of these collectors, documents with <= 0 scores are indeed
+    // filtered.
+    
+    int numPositiveScores = 0;
+    for (int i = 0; i < scores.length; i++) {
+      if (scores[i] > 0) {
+        ++numPositiveScores;
+      }
+    }
+    
+    Directory directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory);
+    writer.commit();
+    IndexReader ir = writer.getReader();
+    writer.close();
+    IndexSearcher searcher = newSearcher(ir);
+    Weight fake = new TermQuery(new Term("fake", "weight")).createWeight(searcher);
+    Scorer s = new SimpleScorer(fake);
+    TopDocsCollector<ScoreDoc> tdc = TopScoreDocCollector.create(scores.length, true);
+    Collector c = new PositiveScoresOnlyCollector(tdc);
+    c.setScorer(s);
+    while (s.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+      c.collect(0);
+    }
+    TopDocs td = tdc.topDocs();
+    ScoreDoc[] sd = td.scoreDocs;
+    assertEquals(numPositiveScores, td.totalHits);
+    for (int i = 0; i < sd.length; i++) {
+      assertTrue("only positive scores should return: " + sd[i].score, sd[i].score > 0);
+    }
+    searcher.close();
+    ir.close();
+    directory.close();
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestPrefixFilter.java b/lucene/backwards/src/test/org/apache/lucene/search/TestPrefixFilter.java
new file mode 100644
index 0000000..65a197f
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestPrefixFilter.java
@@ -0,0 +1,109 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+
+/**
+ * Tests {@link PrefixFilter} class.
+ *
+ */
+public class TestPrefixFilter extends LuceneTestCase {
+  public void testPrefixFilter() throws Exception {
+    Directory directory = newDirectory();
+
+    String[] categories = new String[] {"/Computers/Linux",
+                                        "/Computers/Mac/One",
+                                        "/Computers/Mac/Two",
+                                        "/Computers/Windows"};
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory);
+    for (int i = 0; i < categories.length; i++) {
+      Document doc = new Document();
+      doc.add(newField("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
+      writer.addDocument(doc);
+    }
+    IndexReader reader = writer.getReader();
+
+    // PrefixFilter combined with ConstantScoreQuery
+    PrefixFilter filter = new PrefixFilter(new Term("category", "/Computers"));
+    Query query = new ConstantScoreQuery(filter);
+    IndexSearcher searcher = newSearcher(reader);
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(4, hits.length);
+
+    // test middle of values
+    filter = new PrefixFilter(new Term("category", "/Computers/Mac"));
+    query = new ConstantScoreQuery(filter);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(2, hits.length);
+
+    // test start of values
+    filter = new PrefixFilter(new Term("category", "/Computers/Linux"));
+    query = new ConstantScoreQuery(filter);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+
+    // test end of values
+    filter = new PrefixFilter(new Term("category", "/Computers/Windows"));
+    query = new ConstantScoreQuery(filter);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+
+    // test non-existant
+    filter = new PrefixFilter(new Term("category", "/Computers/ObsoleteOS"));
+    query = new ConstantScoreQuery(filter);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+
+    // test non-existant, before values
+    filter = new PrefixFilter(new Term("category", "/Computers/AAA"));
+    query = new ConstantScoreQuery(filter);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+
+    // test non-existant, after values
+    filter = new PrefixFilter(new Term("category", "/Computers/ZZZ"));
+    query = new ConstantScoreQuery(filter);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+
+    // test zero length prefix
+    filter = new PrefixFilter(new Term("category", ""));
+    query = new ConstantScoreQuery(filter);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(4, hits.length);
+
+    // test non existent field
+    filter = new PrefixFilter(new Term("nonexistantfield", "/Computers"));
+    query = new ConstantScoreQuery(filter);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(0, hits.length);
+    
+    writer.close();
+    searcher.close();
+    reader.close();
+    directory.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java
new file mode 100644
index 0000000..619b676
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java
@@ -0,0 +1,117 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+/**
+ * https://issues.apache.org/jira/browse/LUCENE-1974
+ *
+ * represent the bug of 
+ * 
+ *    BooleanScorer.score(Collector collector, int max, int firstDocID)
+ * 
+ * Line 273, end=8192, subScorerDocID=11378, then more got false?
+ * 
+ */
+public class TestPrefixInBooleanQuery extends LuceneTestCase {
+
+  private static final String FIELD = "name";
+  private static Directory directory;
+  private static IndexReader reader;
+  private static IndexSearcher searcher;
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory);
+
+    Document doc = new Document();
+    Field field = newField(FIELD, "meaninglessnames", Field.Store.NO,
+        Field.Index.NOT_ANALYZED_NO_NORMS);
+    doc.add(field);
+    
+    for (int i = 0; i < 5137; ++i) {
+      writer.addDocument(doc);
+    }
+    
+    field.setValue("tangfulin");
+    writer.addDocument(doc);
+
+    field.setValue("meaninglessnames");
+    for (int i = 5138; i < 11377; ++i) {
+      writer.addDocument(doc);
+    }
+    
+    field.setValue("tangfulin");
+    writer.addDocument(doc);
+    
+    reader = writer.getReader();
+    searcher = newSearcher(reader);
+    writer.close();
+  }
+  
+  @AfterClass
+  public static void afterClass() throws Exception {
+    searcher.close();
+    searcher = null;
+    reader.close();
+    reader = null;
+    directory.close();
+    directory = null;
+  }
+  
+  public void testPrefixQuery() throws Exception {
+    Query query = new PrefixQuery(new Term(FIELD, "tang"));
+    assertEquals("Number of matched documents", 2,
+                 searcher.search(query, null, 1000).totalHits);
+  }
+  public void testTermQuery() throws Exception {
+    Query query = new TermQuery(new Term(FIELD, "tangfulin"));
+    assertEquals("Number of matched documents", 2,
+                 searcher.search(query, null, 1000).totalHits);
+  }
+  public void testTermBooleanQuery() throws Exception {
+    BooleanQuery query = new BooleanQuery();
+    query.add(new TermQuery(new Term(FIELD, "tangfulin")),
+              BooleanClause.Occur.SHOULD);
+    query.add(new TermQuery(new Term(FIELD, "notexistnames")),
+              BooleanClause.Occur.SHOULD);
+    assertEquals("Number of matched documents", 2,
+                 searcher.search(query, null, 1000).totalHits);
+
+  }
+  public void testPrefixBooleanQuery() throws Exception {
+    BooleanQuery query = new BooleanQuery();
+    query.add(new PrefixQuery(new Term(FIELD, "tang")),
+              BooleanClause.Occur.SHOULD);
+    query.add(new TermQuery(new Term(FIELD, "notexistnames")),
+              BooleanClause.Occur.SHOULD);
+    assertEquals("Number of matched documents", 2,
+                 searcher.search(query, null, 1000).totalHits);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestPrefixQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/TestPrefixQuery.java
new file mode 100644
index 0000000..6533815
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestPrefixQuery.java
@@ -0,0 +1,60 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+
+/**
+ * Tests {@link PrefixQuery} class.
+ *
+ */
+public class TestPrefixQuery extends LuceneTestCase {
+  public void testPrefixQuery() throws Exception {
+    Directory directory = newDirectory();
+
+    String[] categories = new String[] {"/Computers",
+                                        "/Computers/Mac",
+                                        "/Computers/Windows"};
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory);
+    for (int i = 0; i < categories.length; i++) {
+      Document doc = new Document();
+      doc.add(newField("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
+      writer.addDocument(doc);
+    }
+    IndexReader reader = writer.getReader();
+
+    PrefixQuery query = new PrefixQuery(new Term("category", "/Computers"));
+    IndexSearcher searcher = newSearcher(reader);
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("All documents in /Computers category and below", 3, hits.length);
+
+    query = new PrefixQuery(new Term("category", "/Computers/Mac"));
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("One in /Computers/Mac", 1, hits.length);
+    writer.close();
+    searcher.close();
+    reader.close();
+    directory.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestPrefixRandom.java b/lucene/backwards/src/test/org/apache/lucene/search/TestPrefixRandom.java
new file mode 100644
index 0000000..60bb06d
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestPrefixRandom.java
@@ -0,0 +1,147 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+/**
+ * Create an index with random unicode terms
+ * Generates random prefix queries, and validates against a simple impl.
+ */
+public class TestPrefixRandom extends LuceneTestCase {
+  private IndexSearcher searcher;
+  private IndexReader reader;
+  private Directory dir;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir, 
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))
+        .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
+    
+    Document doc = new Document();
+    Field bogus1 = newField("bogus", "", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS);
+    Field field = newField("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
+    Field bogus2 = newField("zbogus", "", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS);
+    doc.add(field);
+    doc.add(bogus1);
+    doc.add(bogus2);
+    
+    int num = atLeast(1000);
+
+    for (int i = 0; i < num; i++) {
+      field.setValue(_TestUtil.randomUnicodeString(random, 10));
+      bogus1.setValue(_TestUtil.randomUnicodeString(random, 10));
+      bogus2.setValue(_TestUtil.randomUnicodeString(random, 10));
+      writer.addDocument(doc);
+    }
+    reader = writer.getReader();
+    searcher = newSearcher(reader);
+    writer.close();
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    reader.close();
+    searcher.close();
+    dir.close();
+    super.tearDown();
+  }
+  
+  /** a stupid prefix query that just blasts thru the terms */
+  private class DumbPrefixQuery extends MultiTermQuery {
+    private final Term prefix;
+    
+    DumbPrefixQuery(Term term) {
+      super();
+      prefix = term;
+    }
+    
+    @Override
+    protected FilteredTermEnum getEnum(IndexReader reader) throws IOException {
+      return new SimplePrefixTermEnum(reader, prefix);
+    }
+
+    private class SimplePrefixTermEnum extends FilteredTermEnum {
+      private final Term prefix;
+      private boolean endEnum;
+
+      private SimplePrefixTermEnum(IndexReader reader, Term prefix) throws IOException {
+        this.prefix = prefix;
+        setEnum(reader.terms(new Term(prefix.field(), "")));
+      }
+
+      @Override
+      protected boolean termCompare(Term term) {
+        if (term.field() == prefix.field()) {
+          return term.text().startsWith(prefix.text());
+        } else {
+          endEnum = true;
+          return false;
+        }
+      }
+
+      @Override
+      public float difference() {
+        return 1.0F;
+      }
+
+      @Override
+      protected boolean endEnum() {
+        return endEnum;
+      }
+    }
+
+    @Override
+    public String toString(String field) {
+      return field.toString() + ":" + prefix.toString();
+    }
+  }
+  
+  /** test a bunch of random prefixes */
+  public void testPrefixes() throws Exception {
+      int num = atLeast(100);
+      for (int i = 0; i < num; i++)
+        assertSame(_TestUtil.randomUnicodeString(random, 5));
+  }
+  
+  /** check that the # of hits is the same as from a very
+   * simple prefixquery implementation.
+   */
+  private void assertSame(String prefix) throws IOException {   
+    PrefixQuery smart = new PrefixQuery(new Term("field", prefix));
+    DumbPrefixQuery dumb = new DumbPrefixQuery(new Term("field", prefix));
+    
+    TopDocs smartDocs = searcher.search(smart, 25);
+    TopDocs dumbDocs = searcher.search(dumb, 25);
+    CheckHits.checkEqual(smart, smartDocs.scoreDocs, dumbDocs.scoreDocs);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestQueryTermVector.java b/lucene/backwards/src/test/org/apache/lucene/search/TestQueryTermVector.java
new file mode 100644
index 0000000..0c0cf85
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestQueryTermVector.java
@@ -0,0 +1,53 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.MockAnalyzer;
+
+public class TestQueryTermVector extends LuceneTestCase {
+
+  public void testConstructor() {
+    String [] queryTerm = {"foo", "bar", "foo", "again", "foo", "bar", "go", "go", "go"};
+    //Items are sorted lexicographically
+    String [] gold = {"again", "bar", "foo", "go"};
+    int [] goldFreqs = {1, 2, 3, 3};
+    QueryTermVector result = new QueryTermVector(queryTerm);
+    String [] terms = result.getTerms();
+    assertTrue(terms.length == 4);
+    int [] freq = result.getTermFrequencies();
+    assertTrue(freq.length == 4);
+    checkGold(terms, gold, freq, goldFreqs);
+    result = new QueryTermVector(null);
+    assertTrue(result.getTerms().length == 0);
+    
+    result = new QueryTermVector("foo bar foo again foo bar go go go", new MockAnalyzer(random));
+    terms = result.getTerms();
+    assertTrue(terms.length == 4);
+    freq = result.getTermFrequencies();
+    assertTrue(freq.length == 4);
+    checkGold(terms, gold, freq, goldFreqs);
+  }
+
+  private void checkGold(String[] terms, String[] gold, int[] freq, int[] goldFreqs) {
+    for (int i = 0; i < terms.length; i++) {
+      assertTrue(terms[i].equals(gold[i]));
+      assertTrue(freq[i] == goldFreqs[i]);
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java b/lucene/backwards/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java
new file mode 100644
index 0000000..37d522c
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java
@@ -0,0 +1,84 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestQueryWrapperFilter extends LuceneTestCase {
+
+  public void testBasic() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir);
+    Document doc = new Document();
+    doc.add(newField("field", "value", Store.NO, Index.ANALYZED));
+    writer.addDocument(doc);
+    IndexReader reader = writer.getReader();
+    writer.close();
+
+    TermQuery termQuery = new TermQuery(new Term("field", "value"));
+
+    // should not throw exception with primitive query
+    QueryWrapperFilter qwf = new QueryWrapperFilter(termQuery);
+
+    IndexSearcher searcher = newSearcher(reader);
+    TopDocs hits = searcher.search(new MatchAllDocsQuery(), qwf, 10);
+    assertEquals(1, hits.totalHits);
+    hits = searcher.search(new MatchAllDocsQuery(), new CachingWrapperFilter(qwf), 10);
+    assertEquals(1, hits.totalHits);
+
+    // should not throw exception with complex primitive query
+    BooleanQuery booleanQuery = new BooleanQuery();
+    booleanQuery.add(termQuery, Occur.MUST);
+    booleanQuery.add(new TermQuery(new Term("field", "missing")),
+        Occur.MUST_NOT);
+    qwf = new QueryWrapperFilter(termQuery);
+
+    hits = searcher.search(new MatchAllDocsQuery(), qwf, 10);
+    assertEquals(1, hits.totalHits);
+    hits = searcher.search(new MatchAllDocsQuery(), new CachingWrapperFilter(qwf), 10);
+    assertEquals(1, hits.totalHits);
+
+    // should not throw exception with non primitive Query (doesn't implement
+    // Query#createWeight)
+    qwf = new QueryWrapperFilter(new FuzzyQuery(new Term("field", "valu")));
+
+    hits = searcher.search(new MatchAllDocsQuery(), qwf, 10);
+    assertEquals(1, hits.totalHits);
+    hits = searcher.search(new MatchAllDocsQuery(), new CachingWrapperFilter(qwf), 10);
+    assertEquals(1, hits.totalHits);
+
+    // test a query with no hits
+    termQuery = new TermQuery(new Term("field", "not_exist"));
+    qwf = new QueryWrapperFilter(termQuery);
+    hits = searcher.search(new MatchAllDocsQuery(), qwf, 10);
+    assertEquals(0, hits.totalHits);
+    hits = searcher.search(new MatchAllDocsQuery(), new CachingWrapperFilter(qwf), 10);
+    assertEquals(0, hits.totalHits);
+    searcher.close();
+    reader.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java b/lucene/backwards/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java
new file mode 100644
index 0000000..315b067
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java
@@ -0,0 +1,115 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestScoreCachingWrappingScorer extends LuceneTestCase {
+
+  private static final class SimpleScorer extends Scorer {
+    private int idx = 0;
+    private int doc = -1;
+    
+    public SimpleScorer() {
+      super((Similarity)null);
+    }
+    
+    @Override public float score() throws IOException {
+      // advance idx on purpose, so that consecutive calls to score will get
+      // different results. This is to emulate computation of a score. If
+      // ScoreCachingWrappingScorer is used, this should not be called more than
+      // once per document.
+      return idx == scores.length ? Float.NaN : scores[idx++];
+    }
+
+    @Override public int docID() { return doc; }
+
+    @Override public int nextDoc() throws IOException {
+      return ++doc < scores.length ? doc : NO_MORE_DOCS;
+    }
+    
+    @Override public int advance(int target) throws IOException {
+      doc = target;
+      return doc < scores.length ? doc : NO_MORE_DOCS;
+    }
+    
+  }
+  
+  private static final class ScoreCachingCollector extends Collector {
+
+    private int idx = 0;
+    private Scorer scorer;
+    float[] mscores;
+    
+    public ScoreCachingCollector(int numToCollect) {
+      mscores = new float[numToCollect];
+    }
+    
+    @Override public void collect(int doc) throws IOException {
+      // just a sanity check to avoid IOOB.
+      if (idx == mscores.length) {
+        return; 
+      }
+      
+      // just call score() a couple of times and record the score.
+      mscores[idx] = scorer.score();
+      mscores[idx] = scorer.score();
+      mscores[idx] = scorer.score();
+      ++idx;
+    }
+
+    @Override public void setNextReader(IndexReader reader, int docBase)
+        throws IOException {
+    }
+
+    @Override public void setScorer(Scorer scorer) throws IOException {
+      this.scorer = new ScoreCachingWrappingScorer(scorer);
+    }
+    
+    @Override public boolean acceptsDocsOutOfOrder() {
+      return true;
+    }
+
+  }
+
+  private static final float[] scores = new float[] { 0.7767749f, 1.7839992f,
+      8.9925785f, 7.9608946f, 0.07948637f, 2.6356435f, 7.4950366f, 7.1490803f,
+      8.108544f, 4.961808f, 2.2423935f, 7.285586f, 4.6699767f };
+  
+  public void testGetScores() throws Exception {
+    
+    Scorer s = new SimpleScorer();
+    ScoreCachingCollector scc = new ScoreCachingCollector(scores.length);
+    scc.setScorer(s);
+    
+    // We need to iterate on the scorer so that its doc() advances.
+    int doc;
+    while ((doc = s.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
+      scc.collect(doc);
+    }
+    
+    for (int i = 0; i < scores.length; i++) {
+      assertEquals(scores[i], scc.mscores[i], 0f);
+    }
+    
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestScorerPerf.java b/lucene/backwards/src/test/org/apache/lucene/search/TestScorerPerf.java
new file mode 100755
index 0000000..9f71a23
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestScorerPerf.java
@@ -0,0 +1,411 @@
+package org.apache.lucene.search;
+
+import org.apache.lucene.util.DocIdBitSet;
+import org.apache.lucene.util.LuceneTestCase;
+
+import java.util.BitSet;
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestScorerPerf extends LuceneTestCase {
+  boolean validate = true;  // set to false when doing performance testing
+
+  BitSet[] sets;
+  Term[] terms;
+  IndexSearcher s;
+  Directory d;
+
+  public void createDummySearcher() throws Exception {
+      // Create a dummy index with nothing in it.
+    // This could possibly fail if Lucene starts checking for docid ranges...
+    d = newDirectory();
+    IndexWriter iw = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    iw.addDocument(new Document());
+    iw.close();
+    s = new IndexSearcher(d, true);
+  }
+
+  public void createRandomTerms(int nDocs, int nTerms, double power, Directory dir) throws Exception {
+    int[] freq = new int[nTerms];
+    terms = new Term[nTerms];
+    for (int i=0; i<nTerms; i++) {
+      int f = (nTerms+1)-i;  // make first terms less frequent
+      freq[i] = (int)Math.ceil(Math.pow(f,power));
+      terms[i] = new Term("f",Character.toString((char)('A'+i)));
+    }
+
+    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
+    for (int i=0; i<nDocs; i++) {
+      Document d = new Document();
+      for (int j=0; j<nTerms; j++) {
+        if (random.nextInt(freq[j]) == 0) {
+          d.add(newField("f", terms[j].text(), Field.Store.NO, Field.Index.NOT_ANALYZED));
+          //System.out.println(d);
+        }
+      }
+      iw.addDocument(d);
+    }
+    iw.optimize();
+    iw.close();
+  }
+
+
+  public BitSet randBitSet(int sz, int numBitsToSet) {
+    BitSet set = new BitSet(sz);
+    for (int i=0; i<numBitsToSet; i++) {
+      set.set(random.nextInt(sz));
+    }
+    return set;
+  }
+
+  public BitSet[] randBitSets(int numSets, int setSize) {
+    BitSet[] sets = new BitSet[numSets];
+    for (int i=0; i<sets.length; i++) {
+      sets[i] = randBitSet(setSize, random.nextInt(setSize));
+    }
+    return sets;
+  }
+
+  public static class CountingHitCollector extends Collector {
+    int count=0;
+    int sum=0;
+    protected int docBase = 0;
+
+    @Override
+    public void setScorer(Scorer scorer) throws IOException {}
+    
+    @Override
+    public void collect(int doc) {
+      count++;
+      sum += docBase + doc;  // use it to avoid any possibility of being optimized away
+    }
+
+    public int getCount() { return count; }
+    public int getSum() { return sum; }
+
+    @Override
+    public void setNextReader(IndexReader reader, int base) {
+      docBase = base;
+    }
+    @Override
+    public boolean acceptsDocsOutOfOrder() {
+      return true;
+    }
+  }
+
+
+  public static class MatchingHitCollector extends CountingHitCollector {
+    BitSet answer;
+    int pos=-1;
+    public MatchingHitCollector(BitSet answer) {
+      this.answer = answer;
+    }
+
+    public void collect(int doc, float score) {
+      
+      pos = answer.nextSetBit(pos+1);
+      if (pos != doc + docBase) {
+        throw new RuntimeException("Expected doc " + pos + " but got " + doc + docBase);
+      }
+      super.collect(doc);
+    }
+  }
+
+
+  BitSet addClause(BooleanQuery bq, BitSet result) {
+    final BitSet rnd = sets[random.nextInt(sets.length)];
+    Query q = new ConstantScoreQuery(new Filter() {
+      @Override
+      public DocIdSet getDocIdSet(IndexReader reader) {
+        return new DocIdBitSet(rnd);
+      }
+    });
+    bq.add(q, BooleanClause.Occur.MUST);
+    if (validate) {
+      if (result==null) result = (BitSet)rnd.clone();
+      else result.and(rnd);
+    }
+    return result;
+  }
+
+
+  public int doConjunctions(int iter, int maxClauses) throws IOException {
+    int ret=0;
+
+    for (int i=0; i<iter; i++) {
+      int nClauses = random.nextInt(maxClauses-1)+2; // min 2 clauses
+      BooleanQuery bq = new BooleanQuery();
+      BitSet result=null;
+      for (int j=0; j<nClauses; j++) {
+        result = addClause(bq,result);
+      }
+
+      CountingHitCollector hc = validate ? new MatchingHitCollector(result)
+                                         : new CountingHitCollector();
+      s.search(bq, hc);
+      ret += hc.getSum();
+
+      if (validate) assertEquals(result.cardinality(), hc.getCount());
+      // System.out.println(hc.getCount());
+    }
+    
+    return ret;
+  }
+
+  public int doNestedConjunctions(int iter, int maxOuterClauses, int maxClauses) throws IOException {
+    int ret=0;
+    long nMatches=0;
+
+    for (int i=0; i<iter; i++) {
+      int oClauses = random.nextInt(maxOuterClauses-1)+2;
+      BooleanQuery oq = new BooleanQuery();
+      BitSet result=null;
+
+      for (int o=0; o<oClauses; o++) {
+
+      int nClauses = random.nextInt(maxClauses-1)+2; // min 2 clauses
+      BooleanQuery bq = new BooleanQuery();
+      for (int j=0; j<nClauses; j++) {
+        result = addClause(bq,result);
+      }
+
+      oq.add(bq, BooleanClause.Occur.MUST);
+      } // outer
+
+      CountingHitCollector hc = validate ? new MatchingHitCollector(result)
+                                         : new CountingHitCollector();
+      s.search(oq, hc);
+      nMatches += hc.getCount();
+      ret += hc.getSum();
+      if (validate) assertEquals(result.cardinality(), hc.getCount());
+      // System.out.println(hc.getCount());
+    }
+    if (VERBOSE) System.out.println("Average number of matches="+(nMatches/iter));
+    return ret;
+  }
+
+  
+  public int doTermConjunctions(IndexSearcher s,
+                                int termsInIndex,
+                                int maxClauses,
+                                int iter
+  ) throws IOException {
+    int ret=0;
+
+    long nMatches=0;
+    for (int i=0; i<iter; i++) {
+      int nClauses = random.nextInt(maxClauses-1)+2; // min 2 clauses
+      BooleanQuery bq = new BooleanQuery();
+      BitSet termflag = new BitSet(termsInIndex);
+      for (int j=0; j<nClauses; j++) {
+        int tnum;
+        // don't pick same clause twice
+        tnum = random.nextInt(termsInIndex);
+        if (termflag.get(tnum)) tnum=termflag.nextClearBit(tnum);
+        if (tnum<0 || tnum>=termsInIndex) tnum=termflag.nextClearBit(0);
+        termflag.set(tnum);
+        Query tq = new TermQuery(terms[tnum]);
+        bq.add(tq, BooleanClause.Occur.MUST);
+      }
+
+      CountingHitCollector hc = new CountingHitCollector();
+      s.search(bq, hc);
+      nMatches += hc.getCount();
+      ret += hc.getSum();
+    }
+    if (VERBOSE) System.out.println("Average number of matches="+(nMatches/iter));
+
+    return ret;
+  }
+
+
+  public int doNestedTermConjunctions(IndexSearcher s,
+                                int termsInIndex,
+                                int maxOuterClauses,
+                                int maxClauses,
+                                int iter
+  ) throws IOException {
+    int ret=0;
+    long nMatches=0;
+    for (int i=0; i<iter; i++) {
+      int oClauses = random.nextInt(maxOuterClauses-1)+2;
+      BooleanQuery oq = new BooleanQuery();
+      for (int o=0; o<oClauses; o++) {
+
+      int nClauses = random.nextInt(maxClauses-1)+2; // min 2 clauses
+      BooleanQuery bq = new BooleanQuery();
+      BitSet termflag = new BitSet(termsInIndex);
+      for (int j=0; j<nClauses; j++) {
+        int tnum;
+        // don't pick same clause twice
+        tnum = random.nextInt(termsInIndex);
+        if (termflag.get(tnum)) tnum=termflag.nextClearBit(tnum);
+        if (tnum<0 || tnum>=25) tnum=termflag.nextClearBit(0);
+        termflag.set(tnum);
+        Query tq = new TermQuery(terms[tnum]);
+        bq.add(tq, BooleanClause.Occur.MUST);
+      } // inner
+
+      oq.add(bq, BooleanClause.Occur.MUST);
+      } // outer
+
+
+      CountingHitCollector hc = new CountingHitCollector();
+      s.search(oq, hc);
+      nMatches += hc.getCount();     
+      ret += hc.getSum();
+    }
+    if (VERBOSE) System.out.println("Average number of matches="+(nMatches/iter));
+    return ret;
+  }
+
+
+    public int doSloppyPhrase(IndexSearcher s,
+                                int termsInIndex,
+                                int maxClauses,
+                                int iter
+  ) throws IOException {
+    int ret=0;
+
+    for (int i=0; i<iter; i++) {
+      int nClauses = random.nextInt(maxClauses-1)+2; // min 2 clauses
+      PhraseQuery q = new PhraseQuery();
+      for (int j=0; j<nClauses; j++) {
+        int tnum = random.nextInt(termsInIndex);
+        q.add(new Term("f",Character.toString((char)(tnum+'A'))), j);
+      }
+      q.setSlop(termsInIndex);  // this could be random too
+
+      CountingHitCollector hc = new CountingHitCollector();
+      s.search(q, hc);
+      ret += hc.getSum();
+    }
+
+    return ret;
+  }
+
+
+  public void testConjunctions() throws Exception {
+    // test many small sets... the bugs will be found on boundary conditions
+    createDummySearcher();
+    validate=true;
+    sets=randBitSets(atLeast(1000), atLeast(10));
+    doConjunctions(atLeast(10000), atLeast(5));
+    doNestedConjunctions(atLeast(10000), atLeast(3), atLeast(3));
+    s.close();
+    d.close();
+  }
+
+  /***
+  int bigIter=10;
+
+  public void testConjunctionPerf() throws Exception {
+    r = newRandom();
+    createDummySearcher();
+    validate=false;
+    sets=randBitSets(32,1000000);
+    for (int i=0; i<bigIter; i++) {
+      long start = System.currentTimeMillis();
+      doConjunctions(500,6);
+      long end = System.currentTimeMillis();
+      if (VERBOSE) System.out.println("milliseconds="+(end-start));
+    }
+    s.close();
+  }
+
+  public void testNestedConjunctionPerf() throws Exception {
+    r = newRandom();
+    createDummySearcher();
+    validate=false;
+    sets=randBitSets(32,1000000);
+    for (int i=0; i<bigIter; i++) {
+      long start = System.currentTimeMillis();
+      doNestedConjunctions(500,3,3);
+      long end = System.currentTimeMillis();
+      if (VERBOSE) System.out.println("milliseconds="+(end-start));
+    }
+    s.close();
+  }
+
+
+  public void testConjunctionTerms() throws Exception {
+    r = newRandom();
+    validate=false;
+    RAMDirectory dir = new RAMDirectory();
+    if (VERBOSE) System.out.println("Creating index");
+    createRandomTerms(100000,25,.5, dir);
+    s = new IndexSearcher(dir, true);
+    if (VERBOSE) System.out.println("Starting performance test");
+    for (int i=0; i<bigIter; i++) {
+      long start = System.currentTimeMillis();
+      doTermConjunctions(s,25,5,1000);
+      long end = System.currentTimeMillis();
+      if (VERBOSE) System.out.println("milliseconds="+(end-start));
+    }
+    s.close();
+  }
+
+  public void testNestedConjunctionTerms() throws Exception {
+    r = newRandom();
+    validate=false;    
+    RAMDirectory dir = new RAMDirectory();
+    if (VERBOSE) System.out.println("Creating index");
+    createRandomTerms(100000,25,.2, dir);
+    s = new IndexSearcher(dir, true);
+    if (VERBOSE) System.out.println("Starting performance test");
+    for (int i=0; i<bigIter; i++) {
+      long start = System.currentTimeMillis();
+      doNestedTermConjunctions(s,25,3,3,200);
+      long end = System.currentTimeMillis();
+      if (VERBOSE) System.out.println("milliseconds="+(end-start));
+    }
+    s.close();
+  }
+
+
+  public void testSloppyPhrasePerf() throws Exception {
+    r = newRandom();
+    validate=false;    
+    RAMDirectory dir = new RAMDirectory();
+    if (VERBOSE) System.out.println("Creating index");
+    createRandomTerms(100000,25,2,dir);
+    s = new IndexSearcher(dir, true);
+    if (VERBOSE) System.out.println("Starting performance test");
+    for (int i=0; i<bigIter; i++) {
+      long start = System.currentTimeMillis();
+      doSloppyPhrase(s,25,2,1000);
+      long end = System.currentTimeMillis();
+      if (VERBOSE) System.out.println("milliseconds="+(end-start));
+    }
+    s.close();
+  }
+   ***/
+
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestSearchWithThreads.java b/lucene/backwards/src/test/org/apache/lucene/search/TestSearchWithThreads.java
new file mode 100644
index 0000000..d47bbc8
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestSearchWithThreads.java
@@ -0,0 +1,113 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestSearchWithThreads extends LuceneTestCase {
+  
+  final int NUM_DOCS = atLeast(10000);
+  final int NUM_SEARCH_THREADS = 5;
+  final int RUN_TIME_MSEC = atLeast(1000);
+
+  public void test() throws Exception {
+    final Directory dir = newDirectory();
+    final RandomIndexWriter w = new RandomIndexWriter(random, dir);
+
+    final long startTime = System.currentTimeMillis();
+
+    // TODO: replace w/ the @nightly test data; make this
+    // into an optional @nightly stress test
+    final Document doc = new Document();
+    final Field body = newField("body", "", Field.Index.ANALYZED);
+    doc.add(body);
+    final StringBuilder sb = new StringBuilder();
+    for(int docCount=0;docCount<NUM_DOCS;docCount++) {
+      final int numTerms = random.nextInt(10);
+      for(int termCount=0;termCount<numTerms;termCount++) {
+        sb.append(random.nextBoolean() ? "aaa" : "bbb");
+        sb.append(' ');
+      }
+      body.setValue(sb.toString());
+      w.addDocument(doc);
+      sb.delete(0, sb.length());
+    }
+    final IndexReader r = w.getReader();
+    w.close();
+
+    final long endTime = System.currentTimeMillis();
+    if (VERBOSE) System.out.println("BUILD took " + (endTime-startTime));
+
+    final IndexSearcher s = newSearcher(r);
+
+    final AtomicBoolean failed = new AtomicBoolean();
+    final AtomicLong netSearch = new AtomicLong();
+
+    Thread[] threads = new Thread[NUM_SEARCH_THREADS];
+    for (int threadID = 0; threadID < NUM_SEARCH_THREADS; threadID++) {
+      threads[threadID] = new Thread() {
+        TotalHitCountCollector col = new TotalHitCountCollector();
+          @Override
+          public void run() {
+            try {
+              long totHits = 0;
+              long totSearch = 0;
+              long stopAt = System.currentTimeMillis() + RUN_TIME_MSEC;
+              while(System.currentTimeMillis() < stopAt && !failed.get()) {
+                s.search(new TermQuery(new Term("body", "aaa")), col);
+                totHits += col.getTotalHits();
+                s.search(new TermQuery(new Term("body", "bbb")), col);
+                totHits += col.getTotalHits();
+                totSearch++;
+              }
+              assertTrue(totSearch > 0 && totHits > 0);
+              netSearch.addAndGet(totSearch);
+            } catch (Exception exc) {
+              failed.set(true);
+              throw new RuntimeException(exc);
+            }
+          }
+        };
+      threads[threadID].setDaemon(true);
+    }
+
+    for (Thread t : threads) {
+      t.start();
+    }
+    
+    for (Thread t : threads) {
+      t.join();
+    }
+
+    if (VERBOSE) System.out.println(NUM_SEARCH_THREADS + " threads did " + netSearch.get() + " searches");
+
+    s.close();
+    r.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestSetNorm.java b/lucene/backwards/src/test/org/apache/lucene/search/TestSetNorm.java
new file mode 100644
index 0000000..32631cc
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestSetNorm.java
@@ -0,0 +1,94 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+
+/** Document boost unit test.
+ *
+ *
+ * @version $Revision$
+ */
+public class TestSetNorm extends LuceneTestCase {
+
+  public void testSetNorm() throws Exception {
+    Directory store = newDirectory();
+    IndexWriter writer = new IndexWriter(store, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+
+    // add the same document four times
+    Fieldable f1 = newField("field", "word", Field.Store.YES, Field.Index.ANALYZED);
+    Document d1 = new Document();
+    d1.add(f1);
+    writer.addDocument(d1);
+    writer.addDocument(d1);
+    writer.addDocument(d1);
+    writer.addDocument(d1);
+    writer.close();
+
+    // reset the boost of each instance of this document
+    IndexReader reader = IndexReader.open(store, false);
+    reader.setNorm(0, "field", 1.0f);
+    reader.setNorm(1, "field", 2.0f);
+    reader.setNorm(2, "field", 4.0f);
+    reader.setNorm(3, "field", 16.0f);
+    reader.close();
+
+    // check that searches are ordered by this boost
+    final float[] scores = new float[4];
+
+    IndexSearcher is = new IndexSearcher(store, true);
+    is.search
+      (new TermQuery(new Term("field", "word")),
+       new Collector() {
+         private int base = 0;
+         private Scorer scorer;
+         @Override
+         public void setScorer(Scorer scorer) throws IOException {
+          this.scorer = scorer;
+         }
+         @Override
+         public final void collect(int doc) throws IOException {
+           scores[doc + base] = scorer.score();
+         }
+         @Override
+         public void setNextReader(IndexReader reader, int docBase) {
+           base = docBase;
+         }
+         @Override
+         public boolean acceptsDocsOutOfOrder() {
+           return true;
+         }
+       });
+    is.close();
+    float lastScore = 0.0f;
+
+    for (int i = 0; i < 4; i++) {
+      assertTrue(scores[i] > lastScore);
+      lastScore = scores[i];
+    }
+    store.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestSimilarity.java b/lucene/backwards/src/test/org/apache/lucene/search/TestSimilarity.java
new file mode 100644
index 0000000..040c27e
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestSimilarity.java
@@ -0,0 +1,179 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+import java.util.Collection;
+
+import org.apache.lucene.index.FieldInvertState;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.search.Explanation.IDFExplanation;
+
+/** Similarity unit test.
+ *
+ *
+ * @version $Revision$
+ */
+public class TestSimilarity extends LuceneTestCase {
+  
+  public static class SimpleSimilarity extends Similarity {
+    @Override public float computeNorm(String field, FieldInvertState state) { return state.getBoost(); }
+    @Override public float queryNorm(float sumOfSquaredWeights) { return 1.0f; }
+    @Override public float tf(float freq) { return freq; }
+    @Override public float sloppyFreq(int distance) { return 2.0f; }
+    @Override public float idf(int docFreq, int numDocs) { return 1.0f; }
+    @Override public float coord(int overlap, int maxOverlap) { return 1.0f; }
+    @Override public IDFExplanation idfExplain(Collection<Term> terms, Searcher searcher) throws IOException {
+      return new IDFExplanation() {
+        @Override
+        public float getIdf() {
+          return 1.0f;
+        }
+        @Override
+        public String explain() {
+          return "Inexplicable";
+        }
+      };
+    }
+  }
+
+  public void testSimilarity() throws Exception {
+    Directory store = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, store, 
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setSimilarity(new SimpleSimilarity()));
+    
+    Document d1 = new Document();
+    d1.add(newField("field", "a c", Field.Store.YES, Field.Index.ANALYZED));
+
+    Document d2 = new Document();
+    d2.add(newField("field", "a b c", Field.Store.YES, Field.Index.ANALYZED));
+    
+    writer.addDocument(d1);
+    writer.addDocument(d2);
+    IndexReader reader = writer.getReader();
+    writer.close();
+
+    IndexSearcher searcher = newSearcher(reader);
+    searcher.setSimilarity(new SimpleSimilarity());
+
+    Term a = new Term("field", "a");
+    Term b = new Term("field", "b");
+    Term c = new Term("field", "c");
+
+    searcher.search(new TermQuery(b), new Collector() {
+         private Scorer scorer;
+         @Override
+        public void setScorer(Scorer scorer) throws IOException {
+           this.scorer = scorer; 
+         }
+         @Override
+        public final void collect(int doc) throws IOException {
+           assertEquals(1.0f, scorer.score());
+         }
+         @Override
+        public void setNextReader(IndexReader reader, int docBase) {}
+         @Override
+        public boolean acceptsDocsOutOfOrder() {
+           return true;
+         }
+       });
+
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new TermQuery(a), BooleanClause.Occur.SHOULD);
+    bq.add(new TermQuery(b), BooleanClause.Occur.SHOULD);
+    //System.out.println(bq.toString("field"));
+    searcher.search(bq, new Collector() {
+         private int base = 0;
+         private Scorer scorer;
+         @Override
+        public void setScorer(Scorer scorer) throws IOException {
+           this.scorer = scorer; 
+         }
+         @Override
+        public final void collect(int doc) throws IOException {
+           //System.out.println("Doc=" + doc + " score=" + score);
+           assertEquals((float)doc+base+1, scorer.score());
+         }
+         @Override
+        public void setNextReader(IndexReader reader, int docBase) {
+           base = docBase;
+         }
+         @Override
+        public boolean acceptsDocsOutOfOrder() {
+           return true;
+         }
+       });
+
+    PhraseQuery pq = new PhraseQuery();
+    pq.add(a);
+    pq.add(c);
+    //System.out.println(pq.toString("field"));
+    searcher.search(pq,
+       new Collector() {
+         private Scorer scorer;
+         @Override
+         public void setScorer(Scorer scorer) throws IOException {
+          this.scorer = scorer; 
+         }
+         @Override
+         public final void collect(int doc) throws IOException {
+           //System.out.println("Doc=" + doc + " score=" + score);
+           assertEquals(1.0f, scorer.score());
+         }
+         @Override
+         public void setNextReader(IndexReader reader, int docBase) {}
+         @Override
+         public boolean acceptsDocsOutOfOrder() {
+           return true;
+         }
+       });
+
+    pq.setSlop(2);
+    //System.out.println(pq.toString("field"));
+    searcher.search(pq, new Collector() {
+      private Scorer scorer;
+      @Override
+      public void setScorer(Scorer scorer) throws IOException {
+        this.scorer = scorer; 
+      }
+      @Override
+      public final void collect(int doc) throws IOException {
+        //System.out.println("Doc=" + doc + " score=" + score);
+        assertEquals(2.0f, scorer.score());
+      }
+      @Override
+      public void setNextReader(IndexReader reader, int docBase) {}
+      @Override
+      public boolean acceptsDocsOutOfOrder() {
+        return true;
+      }
+    });
+
+    searcher.close();
+    reader.close();
+    store.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestSimpleExplanations.java b/lucene/backwards/src/test/org/apache/lucene/search/TestSimpleExplanations.java
new file mode 100644
index 0000000..45aec07
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestSimpleExplanations.java
@@ -0,0 +1,432 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.search.spans.SpanNearQuery;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.store.Directory;
+
+
+/**
+ * TestExplanations subclass focusing on basic query types
+ */
+public class TestSimpleExplanations extends TestExplanations {
+
+  // we focus on queries that don't rewrite to other queries.
+  // if we get those covered well, then the ones that rewrite should
+  // also be covered.
+  
+
+  /* simple term tests */
+  
+  public void testT1() throws Exception {
+    qtest("w1", new int[] { 0,1,2,3 });
+  }
+  public void testT2() throws Exception {
+    qtest("w1^1000", new int[] { 0,1,2,3 });
+  }
+  
+  /* MatchAllDocs */
+  
+  public void testMA1() throws Exception {
+    qtest(new MatchAllDocsQuery(), new int[] { 0,1,2,3 });
+  }
+  public void testMA2() throws Exception {
+    Query q=new MatchAllDocsQuery();
+    q.setBoost(1000);
+    qtest(q, new int[] { 0,1,2,3 });
+  }
+
+  /* some simple phrase tests */
+  
+  public void testP1() throws Exception {
+    qtest("\"w1 w2\"", new int[] { 0 });
+  }
+  public void testP2() throws Exception {
+    qtest("\"w1 w3\"", new int[] { 1,3 });
+  }
+  public void testP3() throws Exception {
+    qtest("\"w1 w2\"~1", new int[] { 0,1,2 });
+  }
+  public void testP4() throws Exception {
+    qtest("\"w2 w3\"~1", new int[] { 0,1,2,3 });
+  }
+  public void testP5() throws Exception {
+    qtest("\"w3 w2\"~1", new int[] { 1,3 });
+  }
+  public void testP6() throws Exception {
+    qtest("\"w3 w2\"~2", new int[] { 0,1,3 });
+  }
+  public void testP7() throws Exception {
+    qtest("\"w3 w2\"~3", new int[] { 0,1,2,3 });
+  }
+
+  /* some simple filtered query tests */
+  
+  public void testFQ1() throws Exception {
+    qtest(new FilteredQuery(qp.parse("w1"),
+                            new ItemizedFilter(new int[] {0,1,2,3})),
+          new int[] {0,1,2,3});
+  }
+  public void testFQ2() throws Exception {
+    qtest(new FilteredQuery(qp.parse("w1"),
+                            new ItemizedFilter(new int[] {0,2,3})),
+          new int[] {0,2,3});
+  }
+  public void testFQ3() throws Exception {
+    qtest(new FilteredQuery(qp.parse("xx"),
+                            new ItemizedFilter(new int[] {1,3})),
+          new int[] {3});
+  }
+  public void testFQ4() throws Exception {
+    qtest(new FilteredQuery(qp.parse("xx^1000"),
+                            new ItemizedFilter(new int[] {1,3})),
+          new int[] {3});
+  }
+  public void testFQ6() throws Exception {
+    Query q = new FilteredQuery(qp.parse("xx"),
+                                new ItemizedFilter(new int[] {1,3}));
+    q.setBoost(1000);
+    qtest(q, new int[] {3});
+  }
+
+  /* ConstantScoreQueries */
+  
+  public void testCSQ1() throws Exception {
+    Query q = new ConstantScoreQuery(new ItemizedFilter(new int[] {0,1,2,3}));
+    qtest(q, new int[] {0,1,2,3});
+  }
+  public void testCSQ2() throws Exception {
+    Query q = new ConstantScoreQuery(new ItemizedFilter(new int[] {1,3}));
+    qtest(q, new int[] {1,3});
+  }
+  public void testCSQ3() throws Exception {
+    Query q = new ConstantScoreQuery(new ItemizedFilter(new int[] {0,2}));
+    q.setBoost(1000);
+    qtest(q, new int[] {0,2});
+  }
+  
+  /* DisjunctionMaxQuery */
+  
+  public void testDMQ1() throws Exception {
+    DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f);
+    q.add(qp.parse("w1"));
+    q.add(qp.parse("w5"));
+    qtest(q, new int[] { 0,1,2,3 });
+  }
+  public void testDMQ2() throws Exception {
+    DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.5f);
+    q.add(qp.parse("w1"));
+    q.add(qp.parse("w5"));
+    qtest(q, new int[] { 0,1,2,3 });
+  }
+  public void testDMQ3() throws Exception {
+    DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.5f);
+    q.add(qp.parse("QQ"));
+    q.add(qp.parse("w5"));
+    qtest(q, new int[] { 0 });
+  }
+  public void testDMQ4() throws Exception {
+    DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.5f);
+    q.add(qp.parse("QQ"));
+    q.add(qp.parse("xx"));
+    qtest(q, new int[] { 2,3 });
+  }
+  public void testDMQ5() throws Exception {
+    DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.5f);
+    q.add(qp.parse("yy -QQ"));
+    q.add(qp.parse("xx"));
+    qtest(q, new int[] { 2,3 });
+  }
+  public void testDMQ6() throws Exception {
+    DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.5f);
+    q.add(qp.parse("-yy w3"));
+    q.add(qp.parse("xx"));
+    qtest(q, new int[] { 0,1,2,3 });
+  }
+  public void testDMQ7() throws Exception {
+    DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.5f);
+    q.add(qp.parse("-yy w3"));
+    q.add(qp.parse("w2"));
+    qtest(q, new int[] { 0,1,2,3 });
+  }
+  public void testDMQ8() throws Exception {
+    DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.5f);
+    q.add(qp.parse("yy w5^100"));
+    q.add(qp.parse("xx^100000"));
+    qtest(q, new int[] { 0,2,3 });
+  }
+  public void testDMQ9() throws Exception {
+    DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.5f);
+    q.add(qp.parse("yy w5^100"));
+    q.add(qp.parse("xx^0"));
+    qtest(q, new int[] { 0,2,3 });
+  }
+  
+  /* MultiPhraseQuery */
+  
+  public void testMPQ1() throws Exception {
+    MultiPhraseQuery q = new MultiPhraseQuery();
+    q.add(ta(new String[] {"w1"}));
+    q.add(ta(new String[] {"w2","w3", "xx"}));
+    qtest(q, new int[] { 0,1,2,3 });
+  }
+  public void testMPQ2() throws Exception {
+    MultiPhraseQuery q = new MultiPhraseQuery();
+    q.add(ta(new String[] {"w1"}));
+    q.add(ta(new String[] {"w2","w3"}));
+    qtest(q, new int[] { 0,1,3 });
+  }
+  public void testMPQ3() throws Exception {
+    MultiPhraseQuery q = new MultiPhraseQuery();
+    q.add(ta(new String[] {"w1","xx"}));
+    q.add(ta(new String[] {"w2","w3"}));
+    qtest(q, new int[] { 0,1,2,3 });
+  }
+  public void testMPQ4() throws Exception {
+    MultiPhraseQuery q = new MultiPhraseQuery();
+    q.add(ta(new String[] {"w1"}));
+    q.add(ta(new String[] {"w2"}));
+    qtest(q, new int[] { 0 });
+  }
+  public void testMPQ5() throws Exception {
+    MultiPhraseQuery q = new MultiPhraseQuery();
+    q.add(ta(new String[] {"w1"}));
+    q.add(ta(new String[] {"w2"}));
+    q.setSlop(1);
+    qtest(q, new int[] { 0,1,2 });
+  }
+  public void testMPQ6() throws Exception {
+    MultiPhraseQuery q = new MultiPhraseQuery();
+    q.add(ta(new String[] {"w1","w3"}));
+    q.add(ta(new String[] {"w2"}));
+    q.setSlop(1);
+    qtest(q, new int[] { 0,1,2,3 });
+  }
+
+  /* some simple tests of boolean queries containing term queries */
+  
+  public void testBQ1() throws Exception {
+    qtest("+w1 +w2", new int[] { 0,1,2,3 });
+  }
+  public void testBQ2() throws Exception {
+    qtest("+yy +w3", new int[] { 2,3 });
+  }
+  public void testBQ3() throws Exception {
+    qtest("yy +w3", new int[] { 0,1,2,3 });
+  }
+  public void testBQ4() throws Exception {
+    qtest("w1 (-xx w2)", new int[] { 0,1,2,3 });
+  }
+  public void testBQ5() throws Exception {
+    qtest("w1 (+qq w2)", new int[] { 0,1,2,3 });
+  }
+  public void testBQ6() throws Exception {
+    qtest("w1 -(-qq w5)", new int[] { 1,2,3 });
+  }
+  public void testBQ7() throws Exception {
+    qtest("+w1 +(qq (xx -w2) (+w3 +w4))", new int[] { 0 });
+  }
+  public void testBQ8() throws Exception {
+    qtest("+w1 (qq (xx -w2) (+w3 +w4))", new int[] { 0,1,2,3 });
+  }
+  public void testBQ9() throws Exception {
+    qtest("+w1 (qq (-xx w2) -(+w3 +w4))", new int[] { 0,1,2,3 });
+  }
+  public void testBQ10() throws Exception {
+    qtest("+w1 +(qq (-xx w2) -(+w3 +w4))", new int[] { 1 });
+  }
+  public void testBQ11() throws Exception {
+    qtest("w1 w2^1000.0", new int[] { 0,1,2,3 });
+  }
+  public void testBQ14() throws Exception {
+    BooleanQuery q = new BooleanQuery(true);
+    q.add(qp.parse("QQQQQ"), BooleanClause.Occur.SHOULD);
+    q.add(qp.parse("w1"), BooleanClause.Occur.SHOULD);
+    qtest(q, new int[] { 0,1,2,3 });
+  }
+  public void testBQ15() throws Exception {
+    BooleanQuery q = new BooleanQuery(true);
+    q.add(qp.parse("QQQQQ"), BooleanClause.Occur.MUST_NOT);
+    q.add(qp.parse("w1"), BooleanClause.Occur.SHOULD);
+    qtest(q, new int[] { 0,1,2,3 });
+  }
+  public void testBQ16() throws Exception {
+    BooleanQuery q = new BooleanQuery(true);
+    q.add(qp.parse("QQQQQ"), BooleanClause.Occur.SHOULD);
+    q.add(qp.parse("w1 -xx"), BooleanClause.Occur.SHOULD);
+    qtest(q, new int[] { 0,1 });
+  }
+  public void testBQ17() throws Exception {
+    BooleanQuery q = new BooleanQuery(true);
+    q.add(qp.parse("w2"), BooleanClause.Occur.SHOULD);
+    q.add(qp.parse("w1 -xx"), BooleanClause.Occur.SHOULD);
+    qtest(q, new int[] { 0,1,2,3 });
+  }
+  public void testBQ19() throws Exception {
+    qtest("-yy w3", new int[] { 0,1 });
+  }
+  
+  public void testBQ20() throws Exception {
+    BooleanQuery q = new BooleanQuery();
+    q.setMinimumNumberShouldMatch(2);
+    q.add(qp.parse("QQQQQ"), BooleanClause.Occur.SHOULD);
+    q.add(qp.parse("yy"), BooleanClause.Occur.SHOULD);
+    q.add(qp.parse("zz"), BooleanClause.Occur.SHOULD);
+    q.add(qp.parse("w5"), BooleanClause.Occur.SHOULD);
+    q.add(qp.parse("w4"), BooleanClause.Occur.SHOULD);
+    
+    qtest(q, new int[] { 0,3 });
+    
+  }
+  
+  public void testTermQueryMultiSearcherExplain() throws Exception {
+    // creating two directories for indices
+    Directory indexStoreA = newDirectory();
+    Directory indexStoreB = newDirectory();
+
+    Document lDoc = new Document();
+    lDoc.add(newField("handle", "1 2", Field.Store.YES, Field.Index.ANALYZED));
+    Document lDoc2 = new Document();
+    lDoc2.add(newField("handle", "1 2", Field.Store.YES, Field.Index.ANALYZED));
+    Document lDoc3 = new Document();
+    lDoc3.add(newField("handle", "1 2", Field.Store.YES, Field.Index.ANALYZED));
+
+    IndexWriter writerA = new IndexWriter(indexStoreA, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new StandardAnalyzer(
+        TEST_VERSION_CURRENT)));
+    IndexWriter writerB = new IndexWriter(indexStoreB, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new StandardAnalyzer(
+        TEST_VERSION_CURRENT)));
+
+    writerA.addDocument(lDoc);
+    writerA.addDocument(lDoc2);
+    writerA.optimize();
+    writerA.close();
+
+    writerB.addDocument(lDoc3);
+    writerB.close();
+
+    QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "fulltext", new StandardAnalyzer(TEST_VERSION_CURRENT));
+    Query query = parser.parse("handle:1");
+
+    Searcher[] searchers = new Searcher[2];
+    searchers[0] = new IndexSearcher(indexStoreB, true);
+    searchers[1] = new IndexSearcher(indexStoreA, true);
+    Searcher mSearcher = new MultiSearcher(searchers);
+    ScoreDoc[] hits = mSearcher.search(query, null, 1000).scoreDocs;
+
+    assertEquals(3, hits.length);
+
+    Explanation explain = mSearcher.explain(query, hits[0].doc);
+    String exp = explain.toString(0);
+    assertTrue(exp, exp.indexOf("maxDocs=3") > -1);
+    assertTrue(exp, exp.indexOf("docFreq=3") > -1);
+    
+    query = parser.parse("handle:\"1 2\"");
+    hits = mSearcher.search(query, null, 1000).scoreDocs;
+
+    assertEquals(3, hits.length);
+
+    explain = mSearcher.explain(query, hits[0].doc);
+    exp = explain.toString(0);
+    assertTrue(exp, exp.indexOf("1=3") > -1);
+    assertTrue(exp, exp.indexOf("2=3") > -1);
+    
+    query = new SpanNearQuery(new SpanQuery[] {
+        new SpanTermQuery(new Term("handle", "1")),
+        new SpanTermQuery(new Term("handle", "2")) }, 0, true);
+    hits = mSearcher.search(query, null, 1000).scoreDocs;
+
+    assertEquals(3, hits.length);
+
+    explain = mSearcher.explain(query, hits[0].doc);
+    exp = explain.toString(0);
+    assertTrue(exp, exp.indexOf("1=3") > -1);
+    assertTrue(exp, exp.indexOf("2=3") > -1);
+    mSearcher.close();
+    indexStoreA.close();
+    indexStoreB.close();
+  }
+
+  /* BQ of TQ: using alt so some fields have zero boost and some don't */
+  
+  public void testMultiFieldBQ1() throws Exception {
+    qtest("+w1 +alt:w2", new int[] { 0,1,2,3 });
+  }
+  public void testMultiFieldBQ2() throws Exception {
+    qtest("+yy +alt:w3", new int[] { 2,3 });
+  }
+  public void testMultiFieldBQ3() throws Exception {
+    qtest("yy +alt:w3", new int[] { 0,1,2,3 });
+  }
+  public void testMultiFieldBQ4() throws Exception {
+    qtest("w1 (-xx alt:w2)", new int[] { 0,1,2,3 });
+  }
+  public void testMultiFieldBQ5() throws Exception {
+    qtest("w1 (+alt:qq alt:w2)", new int[] { 0,1,2,3 });
+  }
+  public void testMultiFieldBQ6() throws Exception {
+    qtest("w1 -(-alt:qq alt:w5)", new int[] { 1,2,3 });
+  }
+  public void testMultiFieldBQ7() throws Exception {
+    qtest("+w1 +(alt:qq (alt:xx -alt:w2) (+alt:w3 +alt:w4))", new int[] { 0 });
+  }
+  public void testMultiFieldBQ8() throws Exception {
+    qtest("+alt:w1 (qq (alt:xx -w2) (+alt:w3 +w4))", new int[] { 0,1,2,3 });
+  }
+  public void testMultiFieldBQ9() throws Exception {
+    qtest("+w1 (alt:qq (-xx w2) -(+alt:w3 +w4))", new int[] { 0,1,2,3 });
+  }
+  public void testMultiFieldBQ10() throws Exception {
+    qtest("+w1 +(alt:qq (-xx alt:w2) -(+alt:w3 +w4))", new int[] { 1 });
+  }
+
+  /* BQ of PQ: using alt so some fields have zero boost and some don't */
+  
+  public void testMultiFieldBQofPQ1() throws Exception {
+    qtest("\"w1 w2\" alt:\"w1 w2\"", new int[] { 0 });
+  }
+  public void testMultiFieldBQofPQ2() throws Exception {
+    qtest("\"w1 w3\" alt:\"w1 w3\"", new int[] { 1,3 });
+  }
+  public void testMultiFieldBQofPQ3() throws Exception {
+    qtest("\"w1 w2\"~1 alt:\"w1 w2\"~1", new int[] { 0,1,2 });
+  }
+  public void testMultiFieldBQofPQ4() throws Exception {
+    qtest("\"w2 w3\"~1 alt:\"w2 w3\"~1", new int[] { 0,1,2,3 });
+  }
+  public void testMultiFieldBQofPQ5() throws Exception {
+    qtest("\"w3 w2\"~1 alt:\"w3 w2\"~1", new int[] { 1,3 });
+  }
+  public void testMultiFieldBQofPQ6() throws Exception {
+    qtest("\"w3 w2\"~2 alt:\"w3 w2\"~2", new int[] { 0,1,3 });
+  }
+  public void testMultiFieldBQofPQ7() throws Exception {
+    qtest("\"w3 w2\"~3 alt:\"w3 w2\"~3", new int[] { 0,1,2,3 });
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestSimpleExplanationsOfNonMatches.java b/lucene/backwards/src/test/org/apache/lucene/search/TestSimpleExplanationsOfNonMatches.java
new file mode 100644
index 0000000..ca1e3a7
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestSimpleExplanationsOfNonMatches.java
@@ -0,0 +1,39 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+
+
+/**
+ * subclass of TestSimpleExplanations that verifies non matches.
+ */
+public class TestSimpleExplanationsOfNonMatches
+  extends TestSimpleExplanations {
+
+  /**
+   * Overrides superclass to ignore matches and focus on non-matches
+   *
+   * @see CheckHits#checkNoMatchExplanations
+   */
+  @Override
+  public void qtest(Query q, int[] expDocNrs) throws Exception {
+    CheckHits.checkNoMatchExplanations(q, FIELD, searcher, expDocNrs);
+  }
+    
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
new file mode 100755
index 0000000..2280cf0
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
@@ -0,0 +1,155 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.store.Directory;
+
+public class TestSloppyPhraseQuery extends LuceneTestCase {
+
+  private static final String S_1 = "A A A";
+  private static final String S_2 = "A 1 2 3 A 4 5 6 A";
+
+  private static final Document DOC_1 = makeDocument("X " + S_1 + " Y");
+  private static final Document DOC_2 = makeDocument("X " + S_2 + " Y");
+  private static final Document DOC_3 = makeDocument("X " + S_1 + " A Y");
+  private static final Document DOC_1_B = makeDocument("X " + S_1 + " Y N N N N " + S_1 + " Z");
+  private static final Document DOC_2_B = makeDocument("X " + S_2 + " Y N N N N " + S_2 + " Z");
+  private static final Document DOC_3_B = makeDocument("X " + S_1 + " A Y N N N N " + S_1 + " A Y");
+  private static final Document DOC_4 = makeDocument("A A X A X B A X B B A A X B A A");
+
+  private static final PhraseQuery QUERY_1 = makePhraseQuery( S_1 );
+  private static final PhraseQuery QUERY_2 = makePhraseQuery( S_2 );
+  private static final PhraseQuery QUERY_4 = makePhraseQuery( "X A A");
+
+  /**
+   * Test DOC_4 and QUERY_4.
+   * QUERY_4 has a fuzzy (len=1) match to DOC_4, so all slop values > 0 should succeed.
+   * But only the 3rd sequence of A's in DOC_4 will do.
+   */
+  public void testDoc4_Query4_All_Slops_Should_match() throws Exception {
+    for (int slop=0; slop<30; slop++) {
+      int numResultsExpected = slop<1 ? 0 : 1;
+      checkPhraseQuery(DOC_4, QUERY_4, slop, numResultsExpected);
+    }
+  }
+
+  /**
+   * Test DOC_1 and QUERY_1.
+   * QUERY_1 has an exact match to DOC_1, so all slop values should succeed.
+   * Before LUCENE-1310, a slop value of 1 did not succeed.
+   */
+  public void testDoc1_Query1_All_Slops_Should_match() throws Exception {
+    for (int slop=0; slop<30; slop++) {
+      float score1 = checkPhraseQuery(DOC_1, QUERY_1, slop, 1);
+      float score2 = checkPhraseQuery(DOC_1_B, QUERY_1, slop, 1);
+      assertTrue("slop="+slop+" score2="+score2+" should be greater than score1 "+score1, score2>score1);
+    }
+  }
+
+  /**
+   * Test DOC_2 and QUERY_1.
+   * 6 should be the minimum slop to make QUERY_1 match DOC_2.
+   * Before LUCENE-1310, 7 was the minimum.
+   */
+  public void testDoc2_Query1_Slop_6_or_more_Should_match() throws Exception {
+    for (int slop=0; slop<30; slop++) {
+      int numResultsExpected = slop<6 ? 0 : 1;
+      float score1 = checkPhraseQuery(DOC_2, QUERY_1, slop, numResultsExpected);
+      if (numResultsExpected>0) {
+        float score2 = checkPhraseQuery(DOC_2_B, QUERY_1, slop, 1);
+        assertTrue("slop="+slop+" score2="+score2+" should be greater than score1 "+score1, score2>score1);
+      }
+    }
+  }
+
+  /**
+   * Test DOC_2 and QUERY_2.
+   * QUERY_2 has an exact match to DOC_2, so all slop values should succeed.
+   * Before LUCENE-1310, 0 succeeds, 1 through 7 fail, and 8 or greater succeeds.
+   */
+  public void testDoc2_Query2_All_Slops_Should_match() throws Exception {
+    for (int slop=0; slop<30; slop++) {
+      float score1 = checkPhraseQuery(DOC_2, QUERY_2, slop, 1);
+      float score2 = checkPhraseQuery(DOC_2_B, QUERY_2, slop, 1);
+      assertTrue("slop="+slop+" score2="+score2+" should be greater than score1 "+score1, score2>score1);
+    }
+  }
+
+  /**
+   * Test DOC_3 and QUERY_1.
+   * QUERY_1 has an exact match to DOC_3, so all slop values should succeed.
+   */
+  public void testDoc3_Query1_All_Slops_Should_match() throws Exception {
+    for (int slop=0; slop<30; slop++) {
+      float score1 = checkPhraseQuery(DOC_3, QUERY_1, slop, 1);
+      float score2 = checkPhraseQuery(DOC_3_B, QUERY_1, slop, 1);
+      assertTrue("slop="+slop+" score2="+score2+" should be greater than score1 "+score1, score2>score1);
+    }
+  }
+
+  private float  checkPhraseQuery(Document doc, PhraseQuery query, int slop, int expectedNumResults) throws Exception {
+    query.setSlop(slop);
+
+    Directory ramDir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, ramDir, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
+    writer.addDocument(doc);
+
+    IndexReader reader = writer.getReader();
+
+    IndexSearcher searcher = newSearcher(reader);
+    TopDocs td = searcher.search(query,null,10);
+    //System.out.println("slop: "+slop+"  query: "+query+"  doc: "+doc+"  Expecting number of hits: "+expectedNumResults+" maxScore="+td.getMaxScore());
+    assertEquals("slop: "+slop+"  query: "+query+"  doc: "+doc+"  Wrong number of hits", expectedNumResults, td.totalHits);
+
+    //QueryUtils.check(query,searcher);
+    writer.close();
+    searcher.close();
+    reader.close();
+    ramDir.close();
+
+    return td.getMaxScore();
+  }
+
+  private static Document makeDocument(String docText) {
+    Document doc = new Document();
+    Field f = new Field("f", docText, Field.Store.NO, Field.Index.ANALYZED);
+    f.setOmitNorms(true);
+    doc.add(f);
+    return doc;
+  }
+
+  private static PhraseQuery makePhraseQuery(String terms) {
+    PhraseQuery query = new PhraseQuery();
+    String[] t = terms.split(" +");
+    for (int i=0; i<t.length; i++) {
+      query.add(new Term("f", t[i]));
+    }
+    return query;
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestSort.java b/lucene/backwards/src/test/org/apache/lucene/search/TestSort.java
new file mode 100644
index 0000000..e8e5e14
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestSort.java
@@ -0,0 +1,1251 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.text.Collator;
+import java.util.ArrayList;
+import java.util.BitSet;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Locale;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.CorruptIndexException;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queryParser.ParseException;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.FieldValueHitQueue.Entry;
+import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.DocIdBitSet;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import org.junit.BeforeClass;
+
+/**
+ * Unit tests for sorting code.
+ *
+ * <p>Created: Feb 17, 2004 4:55:10 PM
+ *
+ * @since   lucene 1.4
+ */
+
+public class TestSort extends LuceneTestCase implements Serializable {
+
+  private static int NUM_STRINGS;
+  private IndexSearcher full;
+  private IndexSearcher searchX;
+  private IndexSearcher searchY;
+  private Query queryX;
+  private Query queryY;
+  private Query queryA;
+  private Query queryE;
+  private Query queryF;
+  private Query queryG;
+  private Query queryM;
+  private Sort sort;
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    NUM_STRINGS = atLeast(6000);
+  }
+  // document data:
+  // the tracer field is used to determine which document was hit
+  // the contents field is used to search and sort by relevance
+  // the int field to sort by int
+  // the float field to sort by float
+  // the string field to sort by string
+    // the i18n field includes accented characters for testing locale-specific sorting
+  private String[][] data = new String[][] {
+  // tracer  contents         int            float           string   custom   i18n               long            double,          short,     byte, 'custom parser encoding'
+  {   "A",   "x a",           "5",           "4f",           "c",     "A-3",   "p\u00EAche",      "10",           "-4.0",            "3",    "126", "J"},//A, x
+  {   "B",   "y a",           "5",           "3.4028235E38", "i",     "B-10",  "HAT",             "1000000000",   "40.0",           "24",      "1", "I"},//B, y
+  {   "C",   "x a b c",       "2147483647",  "1.0",          "j",     "A-2",   "p\u00E9ch\u00E9", "99999999","40.00002343",        "125",     "15", "H"},//C, x
+  {   "D",   "y a b c",       "-1",          "0.0f",         "a",     "C-0",   "HUT",   String.valueOf(Long.MAX_VALUE),String.valueOf(Double.MIN_VALUE), String.valueOf(Short.MIN_VALUE), String.valueOf(Byte.MIN_VALUE), "G"},//D, y
+  {   "E",   "x a b c d",     "5",           "2f",           "h",     "B-8",   "peach", String.valueOf(Long.MIN_VALUE),String.valueOf(Double.MAX_VALUE), String.valueOf(Short.MAX_VALUE),           String.valueOf(Byte.MAX_VALUE), "F"},//E,x
+  {   "F",   "y a b c d",     "2",           "3.14159f",     "g",     "B-1",   "H\u00C5T",        "-44",          "343.034435444",  "-3",      "0", "E"},//F,y
+  {   "G",   "x a b c d",     "3",           "-1.0",         "f",     "C-100", "sin",             "323254543543", "4.043544",        "5",    "100", "D"},//G,x
+  {   "H",   "y a b c d",     "0",           "1.4E-45",      "e",     "C-88",  "H\u00D8T",        "1023423423005","4.043545",       "10",    "-50", "C"},//H,y
+  {   "I",   "x a b c d e f", "-2147483648", "1.0e+0",       "d",     "A-10",  "s\u00EDn",        "332422459999", "4.043546",     "-340",     "51", "B"},//I,x
+  {   "J",   "y a b c d e f", "4",           ".5",           "b",     "C-7",   "HOT",             "34334543543",  "4.0000220343",  "300",      "2", "A"},//J,y
+  {   "W",   "g",             "1",           null,           null,    null,    null,              null,           null, null, null, null},
+  {   "X",   "g",             "1",           "0.1",          null,    null,    null,              null,           null, null, null, null},
+  {   "Y",   "g",             "1",           "0.2",          null,    null,    null,              null,           null, null, null, null},
+  {   "Z",   "f g",           null,          null,           null,    null,    null,              null,           null, null, null, null},
+  
+  // Sort Missing first/last
+  {   "a",   "m",            null,          null,           null,    null,    null,              null,           null, null, null, null},
+  {   "b",   "m",            "4",           "4.0",           "4",    null,    null,              "4",           "4", "4", "4", null},
+  {   "c",   "m",            "5",           "5.0",           "5",    null,    null,              "5",           "5", "5", "5", null},
+  {   "d",   "m",            null,          null,           null,    null,    null,              null,           null, null, null, null}
+  }; 
+  
+  // the sort order of Ø versus U depends on the version of the rules being used
+  // for the inherited root locale: Ø's order isnt specified in Locale.US since 
+  // its not used in english.
+  private boolean oStrokeFirst = Collator.getInstance(new Locale("")).compare("Ø", "U") < 0;
+  
+  // create an index of all the documents, or just the x, or just the y documents
+  private IndexSearcher getIndex (boolean even, boolean odd)
+  throws IOException {
+    Directory indexStore = newDirectory();
+    dirs.add(indexStore);
+    RandomIndexWriter writer = new RandomIndexWriter(random, indexStore, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+
+    for (int i=0; i<data.length; ++i) {
+      if (((i%2)==0 && even) || ((i%2)==1 && odd)) {
+        Document doc = new Document();
+        doc.add (new Field ("tracer",   data[i][0], Field.Store.YES, Field.Index.NO));
+        doc.add (new Field ("contents", data[i][1], Field.Store.NO, Field.Index.ANALYZED));
+        if (data[i][2] != null) doc.add (new Field ("int",      data[i][2], Field.Store.NO, Field.Index.NOT_ANALYZED));
+        if (data[i][3] != null) doc.add (new Field ("float",    data[i][3], Field.Store.NO, Field.Index.NOT_ANALYZED));
+        if (data[i][4] != null) doc.add (new Field ("string",   data[i][4], Field.Store.NO, Field.Index.NOT_ANALYZED));
+        if (data[i][5] != null) doc.add (new Field ("custom",   data[i][5], Field.Store.NO, Field.Index.NOT_ANALYZED));
+        if (data[i][6] != null) doc.add (new Field ("i18n",     data[i][6], Field.Store.NO, Field.Index.NOT_ANALYZED));
+        if (data[i][7] != null) doc.add (new Field ("long",     data[i][7], Field.Store.NO, Field.Index.NOT_ANALYZED));
+        if (data[i][8] != null) doc.add (new Field ("double",     data[i][8], Field.Store.NO, Field.Index.NOT_ANALYZED));
+        if (data[i][9] != null) doc.add (new Field ("short",     data[i][9], Field.Store.NO, Field.Index.NOT_ANALYZED));
+        if (data[i][10] != null) doc.add (new Field ("byte",     data[i][10], Field.Store.NO, Field.Index.NOT_ANALYZED));
+        if (data[i][11] != null) doc.add (new Field ("parser",     data[i][11], Field.Store.NO, Field.Index.NOT_ANALYZED));
+        doc.setBoost(2);  // produce some scores above 1.0
+        writer.addDocument (doc);
+      }
+    }
+    IndexReader reader = writer.getReader();
+    writer.close ();
+    IndexSearcher s = newSearcher(reader);
+    s.setDefaultFieldSortScoring(true, true);
+    return s;
+  }
+
+  private IndexSearcher getFullIndex()
+  throws IOException {
+    return getIndex (true, true);
+  }
+  
+  private IndexSearcher getFullStrings() throws CorruptIndexException, LockObtainFailedException, IOException {
+    Directory indexStore = newDirectory();
+    dirs.add(indexStore);
+    IndexWriter writer = new IndexWriter(
+        indexStore,
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(4).
+            setMergePolicy(newLogMergePolicy(97))
+    );
+    for (int i=0; i<NUM_STRINGS; i++) {
+        Document doc = new Document();
+        String num = getRandomCharString(getRandomNumber(2, 8), 48, 52);
+        doc.add (new Field ("tracer", num, Field.Store.YES, Field.Index.NO));
+        //doc.add (new Field ("contents", Integer.toString(i), Field.Store.NO, Field.Index.ANALYZED));
+        doc.add (new Field ("string", num, Field.Store.NO, Field.Index.NOT_ANALYZED));
+        String num2 = getRandomCharString(getRandomNumber(1, 4), 48, 50);
+        doc.add (new Field ("string2", num2, Field.Store.NO, Field.Index.NOT_ANALYZED));
+        doc.add (new Field ("tracer2", num2, Field.Store.YES, Field.Index.NO));
+        doc.setBoost(2);  // produce some scores above 1.0
+        writer.addDocument (doc);
+      
+    }
+    //writer.optimize ();
+    //System.out.println(writer.getSegmentCount());
+    writer.close ();
+    return new IndexSearcher (indexStore, true);
+  }
+  
+  public String getRandomNumberString(int num, int low, int high) {
+    StringBuilder sb = new StringBuilder();
+    for (int i = 0; i < num; i++) {
+      sb.append(getRandomNumber(low, high));
+    }
+    return sb.toString();
+  }
+  
+  public String getRandomCharString(int num) {
+    return getRandomCharString(num, 48, 122);
+  }
+  
+  public String getRandomCharString(int num, int start, int end) {
+    StringBuilder sb = new StringBuilder();
+    for (int i = 0; i < num; i++) {
+      sb.append(new Character((char) getRandomNumber(start, end)));
+    }
+    return sb.toString();
+  }
+  
+  public int getRandomNumber(final int low, final int high) {
+  
+    int randInt = (Math.abs(random.nextInt()) % (high - low)) + low;
+
+    return randInt;
+  }
+
+  private IndexSearcher getXIndex()
+  throws IOException {
+    return getIndex (true, false);
+  }
+
+  private IndexSearcher getYIndex()
+  throws IOException {
+    return getIndex (false, true);
+  }
+
+  private IndexSearcher getEmptyIndex()
+  throws IOException {
+    return getIndex (false, false);
+  }
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    full = getFullIndex();
+    searchX = getXIndex();
+    searchY = getYIndex();
+    queryX = new TermQuery (new Term ("contents", "x"));
+    queryY = new TermQuery (new Term ("contents", "y"));
+    queryA = new TermQuery (new Term ("contents", "a"));
+    queryE = new TermQuery (new Term ("contents", "e"));
+    queryF = new TermQuery (new Term ("contents", "f"));
+    queryG = new TermQuery (new Term ("contents", "g"));
+    queryM = new TermQuery (new Term ("contents", "m"));
+    sort = new Sort();
+  }
+  
+  private ArrayList<Directory> dirs = new ArrayList<Directory>();
+  
+  @Override
+  public void tearDown() throws Exception {
+    full.reader.close();
+    searchX.reader.close();
+    searchY.reader.close();
+    full.close();
+    searchX.close();
+    searchY.close();
+    for (Directory dir : dirs)
+      dir.close();
+    super.tearDown();
+  }
+
+  // test the sorts by score and document number
+  public void testBuiltInSorts() throws Exception {
+    sort = new Sort();
+    assertMatches (full, queryX, sort, "ACEGI");
+    assertMatches (full, queryY, sort, "BDFHJ");
+
+    sort.setSort(SortField.FIELD_DOC);
+    assertMatches (full, queryX, sort, "ACEGI");
+    assertMatches (full, queryY, sort, "BDFHJ");
+  }
+
+  // test sorts where the type of field is specified
+  public void testTypedSort() throws Exception {
+    sort.setSort (new SortField ("int", SortField.INT), SortField.FIELD_DOC );
+    assertMatches (full, queryX, sort, "IGAEC");
+    assertMatches (full, queryY, sort, "DHFJB");
+
+    sort.setSort (new SortField ("float", SortField.FLOAT), SortField.FIELD_DOC );
+    assertMatches (full, queryX, sort, "GCIEA");
+    assertMatches (full, queryY, sort, "DHJFB");
+
+    sort.setSort (new SortField ("long", SortField.LONG), SortField.FIELD_DOC );
+    assertMatches (full, queryX, sort, "EACGI");
+    assertMatches (full, queryY, sort, "FBJHD");
+
+    sort.setSort (new SortField ("double", SortField.DOUBLE), SortField.FIELD_DOC );
+    assertMatches (full, queryX, sort, "AGICE");
+    assertMatches (full, queryY, sort, "DJHBF");
+
+    sort.setSort (new SortField ("byte", SortField.BYTE), SortField.FIELD_DOC );
+    assertMatches (full, queryX, sort, "CIGAE");
+    assertMatches (full, queryY, sort, "DHFBJ");
+
+    sort.setSort (new SortField ("short", SortField.SHORT), SortField.FIELD_DOC );
+    assertMatches (full, queryX, sort, "IAGCE");
+    assertMatches (full, queryY, sort, "DFHBJ");
+
+    sort.setSort (new SortField ("string", SortField.STRING), SortField.FIELD_DOC );
+    assertMatches (full, queryX, sort, "AIGEC");
+    assertMatches (full, queryY, sort, "DJHFB");
+  }
+  
+  private static class SortMissingLastTestHelper {
+    final SortField sortField;
+    final Object min;
+    final Object max;
+    
+    SortMissingLastTestHelper( SortField sortField, Object min, Object max ) {
+      this.sortField = sortField;
+      this.min = min;
+      this.max = max;
+    }
+  }
+
+  // test sorts where the type of field is specified
+  public void testSortMissingLast() throws Exception {
+    
+    @SuppressWarnings("boxing")
+    SortMissingLastTestHelper[] ascendTesters = new SortMissingLastTestHelper[] {
+        new SortMissingLastTestHelper( new SortField(   "byte",   SortField.BYTE ), Byte.MIN_VALUE,    Byte.MAX_VALUE ),
+        new SortMissingLastTestHelper( new SortField(  "short",  SortField.SHORT ), Short.MIN_VALUE,   Short.MAX_VALUE ),
+        new SortMissingLastTestHelper( new SortField(    "int",    SortField.INT ), Integer.MIN_VALUE, Integer.MAX_VALUE ),
+        new SortMissingLastTestHelper( new SortField(   "long",   SortField.LONG ), Long.MIN_VALUE,    Long.MAX_VALUE ),
+        new SortMissingLastTestHelper( new SortField(  "float",  SortField.FLOAT ), Float.MIN_VALUE,   Float.MAX_VALUE ),
+        new SortMissingLastTestHelper( new SortField( "double", SortField.DOUBLE ), Double.MIN_VALUE,  Double.MAX_VALUE ),
+    };
+    
+    @SuppressWarnings("boxing")
+    SortMissingLastTestHelper[] descendTesters = new SortMissingLastTestHelper[] {
+      new SortMissingLastTestHelper( new SortField(   "byte",   SortField.BYTE, true ), Byte.MIN_VALUE,    Byte.MAX_VALUE ),
+      new SortMissingLastTestHelper( new SortField(  "short",  SortField.SHORT, true ), Short.MIN_VALUE,   Short.MAX_VALUE ),
+      new SortMissingLastTestHelper( new SortField(    "int",    SortField.INT, true ), Integer.MIN_VALUE, Integer.MAX_VALUE ),
+      new SortMissingLastTestHelper( new SortField(   "long",   SortField.LONG, true ), Long.MIN_VALUE,    Long.MAX_VALUE ),
+      new SortMissingLastTestHelper( new SortField(  "float",  SortField.FLOAT, true ), Float.MIN_VALUE,   Float.MAX_VALUE ),
+      new SortMissingLastTestHelper( new SortField( "double", SortField.DOUBLE, true ), Double.MIN_VALUE,  Double.MAX_VALUE ),
+    };
+    
+    // Default order: ascending
+    for( SortMissingLastTestHelper t : ascendTesters ) {
+      sort.setSort (t.sortField, SortField.FIELD_DOC );
+      assertMatches("sortField:"+t.sortField, full, queryM, sort, "adbc" );
+
+      sort.setSort (t.sortField.setMissingValue( t.max ), SortField.FIELD_DOC );
+      assertMatches("sortField:"+t.sortField, full, queryM, sort, "bcad" );
+
+      sort.setSort (t.sortField.setMissingValue( t.min ), SortField.FIELD_DOC );
+      assertMatches("sortField:"+t.sortField, full, queryM, sort, "adbc" );
+    }
+    
+    // Reverse order: descending (Note: Order for un-valued documents remains the same due to tie breaker: a,d)
+    for( SortMissingLastTestHelper t : descendTesters ) {
+      sort.setSort (t.sortField, SortField.FIELD_DOC );
+      assertMatches("sortField:"+t.sortField, full, queryM, sort, "cbad" );
+      
+      sort.setSort (t.sortField.setMissingValue( t.max ), SortField.FIELD_DOC );
+      assertMatches("sortField:"+t.sortField, full, queryM, sort, "adcb" );
+      
+      sort.setSort (t.sortField.setMissingValue( t.min ), SortField.FIELD_DOC );
+      assertMatches("sortField:"+t.sortField, full, queryM, sort, "cbad" );
+    }
+    
+    
+  }
+  
+  /**
+   * Test String sorting: small queue to many matches, multi field sort, reverse sort
+   */
+  public void testStringSort() throws IOException, ParseException {
+    ScoreDoc[] result = null;
+    IndexSearcher searcher = getFullStrings();
+    sort.setSort(
+        new SortField("string", SortField.STRING),
+        new SortField("string2", SortField.STRING, true),
+        SortField.FIELD_DOC );
+
+    result = searcher.search(new MatchAllDocsQuery(), null, 500, sort).scoreDocs;
+
+    StringBuilder buff = new StringBuilder();
+    int n = result.length;
+    String last = null;
+    String lastSub = null;
+    int lastDocId = 0;
+    boolean fail = false;
+    for (int x = 0; x < n; ++x) {
+      Document doc2 = searcher.doc(result[x].doc);
+      String[] v = doc2.getValues("tracer");
+      String[] v2 = doc2.getValues("tracer2");
+      for (int j = 0; j < v.length; ++j) {
+        if (last != null) {
+          int cmp = v[j].compareTo(last);
+          if (!(cmp >= 0)) { // ensure first field is in order
+            fail = true;
+            System.out.println("fail:" + v[j] + " < " + last);
+          }
+          if (cmp == 0) { // ensure second field is in reverse order
+            cmp = v2[j].compareTo(lastSub);
+            if (cmp > 0) {
+              fail = true;
+              System.out.println("rev field fail:" + v2[j] + " > " + lastSub);
+            } else if(cmp == 0) { // ensure docid is in order
+              if (result[x].doc < lastDocId) {
+                fail = true;
+                System.out.println("doc fail:" + result[x].doc + " > " + lastDocId);
+              }
+            }
+          }
+        }
+        last = v[j];
+        lastSub = v2[j];
+        lastDocId = result[x].doc;
+        buff.append(v[j] + "(" + v2[j] + ")(" + result[x].doc+") ");
+      }
+    }
+    if(fail) {
+      System.out.println("topn field1(field2)(docID):" + buff);
+    }
+    assertFalse("Found sort results out of order", fail);
+    searcher.close();
+  }
+  
+  /** 
+   * test sorts where the type of field is specified and a custom field parser 
+   * is used, that uses a simple char encoding. The sorted string contains a 
+   * character beginning from 'A' that is mapped to a numeric value using some 
+   * "funny" algorithm to be different for each data type.
+   */
+  public void testCustomFieldParserSort() throws Exception {
+    // since tests explicilty uses different parsers on the same fieldname
+    // we explicitly check/purge the FieldCache between each assertMatch
+    FieldCache fc = FieldCache.DEFAULT;
+
+
+    sort.setSort (new SortField ("parser", new FieldCache.IntParser(){
+      public final int parseInt(final String val) {
+        return (val.charAt(0)-'A') * 123456;
+      }
+    }), SortField.FIELD_DOC );
+    assertMatches (full, queryA, sort, "JIHGFEDCBA");
+    assertSaneFieldCaches(getName() + " IntParser");
+    fc.purgeAllCaches();
+
+    sort.setSort (new SortField ("parser", new FieldCache.FloatParser(){
+      public final float parseFloat(final String val) {
+        return (float) Math.sqrt( val.charAt(0) );
+      }
+    }), SortField.FIELD_DOC );
+    assertMatches (full, queryA, sort, "JIHGFEDCBA");
+    assertSaneFieldCaches(getName() + " FloatParser");
+    fc.purgeAllCaches();
+
+    sort.setSort (new SortField ("parser", new FieldCache.LongParser(){
+      public final long parseLong(final String val) {
+        return (val.charAt(0)-'A') * 1234567890L;
+      }
+    }), SortField.FIELD_DOC );
+    assertMatches (full, queryA, sort, "JIHGFEDCBA");
+    assertSaneFieldCaches(getName() + " LongParser");
+    fc.purgeAllCaches();
+
+    sort.setSort (new SortField ("parser", new FieldCache.DoubleParser(){
+      public final double parseDouble(final String val) {
+        return Math.pow( val.charAt(0), (val.charAt(0)-'A') );
+      }
+    }), SortField.FIELD_DOC );
+    assertMatches (full, queryA, sort, "JIHGFEDCBA");
+    assertSaneFieldCaches(getName() + " DoubleParser");
+    fc.purgeAllCaches();
+
+    sort.setSort (new SortField ("parser", new FieldCache.ByteParser(){
+      public final byte parseByte(final String val) {
+        return (byte) (val.charAt(0)-'A');
+      }
+    }), SortField.FIELD_DOC );
+    assertMatches (full, queryA, sort, "JIHGFEDCBA");
+    assertSaneFieldCaches(getName() + " ByteParser");
+    fc.purgeAllCaches();
+
+    sort.setSort (new SortField ("parser", new FieldCache.ShortParser(){
+      public final short parseShort(final String val) {
+        return (short) (val.charAt(0)-'A');
+      }
+    }), SortField.FIELD_DOC );
+    assertMatches (full, queryA, sort, "JIHGFEDCBA");
+    assertSaneFieldCaches(getName() + " ShortParser");
+    fc.purgeAllCaches();
+  }
+
+  // test sorts when there's nothing in the index
+  public void testEmptyIndex() throws Exception {
+    Searcher empty = getEmptyIndex();
+
+    sort = new Sort();
+    assertMatches (empty, queryX, sort, "");
+
+    sort.setSort(SortField.FIELD_DOC);
+    assertMatches (empty, queryX, sort, "");
+
+    sort.setSort (new SortField ("int", SortField.INT), SortField.FIELD_DOC );
+    assertMatches (empty, queryX, sort, "");
+
+    sort.setSort (new SortField ("string", SortField.STRING, true), SortField.FIELD_DOC );
+    assertMatches (empty, queryX, sort, "");
+
+    sort.setSort (new SortField ("float", SortField.FLOAT), new SortField ("string", SortField.STRING) );
+    assertMatches (empty, queryX, sort, "");
+  }
+
+  static class MyFieldComparator extends FieldComparator<Integer> {
+    int[] docValues;
+    int[] slotValues;
+    int bottomValue;
+
+    MyFieldComparator(int numHits) {
+      slotValues = new int[numHits];
+    }
+
+    @Override
+    public void copy(int slot, int doc) {
+      slotValues[slot] = docValues[doc];
+    }
+
+    @Override
+    public int compare(int slot1, int slot2) {
+      // values are small enough that overflow won't happen
+      return slotValues[slot1] - slotValues[slot2];
+    }
+
+    @Override
+    public int compareBottom(int doc) {
+      return bottomValue - docValues[doc];
+    }
+
+    @Override
+    public void setBottom(int bottom) {
+      bottomValue = slotValues[bottom];
+    }
+
+    private static final FieldCache.IntParser testIntParser = new FieldCache.IntParser() {
+      public final int parseInt(final String val) {
+        return (val.charAt(0)-'A') * 123456;
+      }
+    };
+
+    @Override
+    public void setNextReader(IndexReader reader, int docBase) throws IOException {
+      docValues = FieldCache.DEFAULT.getInts(reader, "parser", testIntParser);
+    }
+
+    @Override
+    public Integer value(int slot) {
+      return Integer.valueOf(slotValues[slot]);
+    }
+  }
+
+  static class MyFieldComparatorSource extends FieldComparatorSource {
+    @Override
+    public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
+      return new MyFieldComparator(numHits);
+    }
+  }
+
+  // Test sorting w/ custom FieldComparator
+  public void testNewCustomFieldParserSort() throws Exception {
+    sort.setSort (new SortField ("parser", new MyFieldComparatorSource()));
+    assertMatches (full, queryA, sort, "JIHGFEDCBA");
+  }
+
+  // test sorts in reverse
+  public void testReverseSort() throws Exception {
+    sort.setSort (new SortField (null, SortField.SCORE, true), SortField.FIELD_DOC );
+    assertMatches (full, queryX, sort, "IEGCA");
+    assertMatches (full, queryY, sort, "JFHDB");
+
+    sort.setSort (new SortField (null, SortField.DOC, true));
+    assertMatches (full, queryX, sort, "IGECA");
+    assertMatches (full, queryY, sort, "JHFDB");
+
+    sort.setSort (new SortField ("int", SortField.INT, true) );
+    assertMatches (full, queryX, sort, "CAEGI");
+    assertMatches (full, queryY, sort, "BJFHD");
+
+    sort.setSort (new SortField ("float", SortField.FLOAT, true) );
+    assertMatches (full, queryX, sort, "AECIG");
+    assertMatches (full, queryY, sort, "BFJHD");
+
+    sort.setSort (new SortField ("string", SortField.STRING, true) );
+    assertMatches (full, queryX, sort, "CEGIA");
+    assertMatches (full, queryY, sort, "BFHJD");
+  }
+
+  // test sorting when the sort field is empty (undefined) for some of the documents
+  public void testEmptyFieldSort() throws Exception {
+    sort.setSort (new SortField ("string", SortField.STRING) );
+    assertMatches (full, queryF, sort, "ZJI");
+
+    sort.setSort (new SortField ("string", SortField.STRING, true) );
+    assertMatches (full, queryF, sort, "IJZ");
+    
+    sort.setSort (new SortField ("i18n", Locale.ENGLISH));
+    assertMatches (full, queryF, sort, "ZJI");
+    
+    sort.setSort (new SortField ("i18n", Locale.ENGLISH, true));
+    assertMatches (full, queryF, sort, "IJZ");
+
+    sort.setSort (new SortField ("int", SortField.INT) );
+    assertMatches (full, queryF, sort, "IZJ");
+
+    sort.setSort (new SortField ("int", SortField.INT, true) );
+    assertMatches (full, queryF, sort, "JZI");
+
+    sort.setSort (new SortField ("float", SortField.FLOAT) );
+    assertMatches (full, queryF, sort, "ZJI");
+
+    // using a nonexisting field as first sort key shouldn't make a difference:
+    sort.setSort (new SortField ("nosuchfield", SortField.STRING),
+        new SortField ("float", SortField.FLOAT) );
+    assertMatches (full, queryF, sort, "ZJI");
+
+    sort.setSort (new SortField ("float", SortField.FLOAT, true) );
+    assertMatches (full, queryF, sort, "IJZ");
+
+    // When a field is null for both documents, the next SortField should be used.
+                // Works for
+    sort.setSort (new SortField ("int", SortField.INT),
+                                new SortField ("string", SortField.STRING),
+        new SortField ("float", SortField.FLOAT) );
+    assertMatches (full, queryG, sort, "ZWXY");
+
+    // Reverse the last criterium to make sure the test didn't pass by chance
+    sort.setSort (new SortField ("int", SortField.INT),
+                                new SortField ("string", SortField.STRING),
+        new SortField ("float", SortField.FLOAT, true) );
+    assertMatches (full, queryG, sort, "ZYXW");
+
+    // Do the same for a MultiSearcher
+    Searcher multiSearcher=new MultiSearcher (new Searchable[] { full });
+
+    sort.setSort (new SortField ("int", SortField.INT),
+                                new SortField ("string", SortField.STRING),
+        new SortField ("float", SortField.FLOAT) );
+    assertMatches (multiSearcher, queryG, sort, "ZWXY");
+
+    sort.setSort (new SortField ("int", SortField.INT),
+                                new SortField ("string", SortField.STRING),
+        new SortField ("float", SortField.FLOAT, true) );
+    assertMatches (multiSearcher, queryG, sort, "ZYXW");
+    // Don't close the multiSearcher. it would close the full searcher too!
+
+    // Do the same for a ParallelMultiSearcher
+    ExecutorService exec = Executors.newFixedThreadPool(_TestUtil.nextInt(random, 2, 8));
+    Searcher parallelSearcher=new ParallelMultiSearcher (exec, full);
+
+    sort.setSort (new SortField ("int", SortField.INT),
+                                new SortField ("string", SortField.STRING),
+        new SortField ("float", SortField.FLOAT) );
+    assertMatches (parallelSearcher, queryG, sort, "ZWXY");
+
+    sort.setSort (new SortField ("int", SortField.INT),
+                                new SortField ("string", SortField.STRING),
+        new SortField ("float", SortField.FLOAT, true) );
+    assertMatches (parallelSearcher, queryG, sort, "ZYXW");
+    parallelSearcher.close();
+    exec.awaitTermination(1000, TimeUnit.MILLISECONDS);
+  }
+
+  // test sorts using a series of fields
+  public void testSortCombos() throws Exception {
+    sort.setSort (new SortField ("int", SortField.INT), new SortField ("float", SortField.FLOAT) );
+    assertMatches (full, queryX, sort, "IGEAC");
+
+    sort.setSort (new SortField ("int", SortField.INT, true), new SortField (null, SortField.DOC, true) );
+    assertMatches (full, queryX, sort, "CEAGI");
+
+    sort.setSort (new SortField ("float", SortField.FLOAT), new SortField ("string", SortField.STRING) );
+    assertMatches (full, queryX, sort, "GICEA");
+  }
+
+  // test using a Locale for sorting strings
+  public void testLocaleSort() throws Exception {
+    sort.setSort (new SortField ("string", Locale.US) );
+    assertMatches (full, queryX, sort, "AIGEC");
+    assertMatches (full, queryY, sort, "DJHFB");
+
+    sort.setSort (new SortField ("string", Locale.US, true) );
+    assertMatches (full, queryX, sort, "CEGIA");
+    assertMatches (full, queryY, sort, "BFHJD");
+  }
+
+  // test using various international locales with accented characters
+  // (which sort differently depending on locale)
+  public void testInternationalSort() throws Exception {
+    sort.setSort (new SortField ("i18n", Locale.US));
+    assertMatches (full, queryY, sort, oStrokeFirst ? "BFJHD" : "BFJDH");
+
+    sort.setSort (new SortField ("i18n", new Locale("sv", "se")));
+    assertMatches (full, queryY, sort, "BJDFH");
+
+    sort.setSort (new SortField ("i18n", new Locale("da", "dk")));
+    assertMatches (full, queryY, sort, "BJDHF");
+
+    sort.setSort (new SortField ("i18n", Locale.US));
+    assertMatches (full, queryX, sort, "ECAGI");
+
+    sort.setSort (new SortField ("i18n", Locale.FRANCE));
+    assertMatches (full, queryX, sort, "EACGI");
+  }
+    
+    // Test the MultiSearcher's ability to preserve locale-sensitive ordering
+    // by wrapping it around a single searcher
+  public void testInternationalMultiSearcherSort() throws Exception {
+    Searcher multiSearcher = new MultiSearcher (new Searchable[] { full });
+    
+    sort.setSort (new SortField ("i18n", new Locale("sv", "se")));
+    assertMatches (multiSearcher, queryY, sort, "BJDFH");
+    
+    sort.setSort (new SortField ("i18n", Locale.US));
+    assertMatches (multiSearcher, queryY, sort, oStrokeFirst ? "BFJHD" : "BFJDH");
+    
+    sort.setSort (new SortField ("i18n", new Locale("da", "dk")));
+    assertMatches (multiSearcher, queryY, sort, "BJDHF");
+  } 
+
+  // test a variety of sorts using more than one searcher
+  public void testMultiSort() throws Exception {
+    MultiSearcher searcher = new MultiSearcher (new Searchable[] { searchX, searchY });
+    runMultiSorts(searcher, false);
+  }
+
+  // test a variety of sorts using a parallel multisearcher
+  public void testParallelMultiSort() throws Exception {
+    ExecutorService exec = Executors.newFixedThreadPool(_TestUtil.nextInt(random, 2, 8));
+    Searcher searcher = new ParallelMultiSearcher (exec, searchX, searchY);
+    runMultiSorts(searcher, false);
+    searcher.close();
+    exec.awaitTermination(1000, TimeUnit.MILLISECONDS);
+  }
+
+  // test that the relevancy scores are the same even if
+  // hits are sorted
+  public void testNormalizedScores() throws Exception {
+
+    // capture relevancy scores
+    HashMap<String,Float> scoresX = getScores (full.search (queryX, null, 1000).scoreDocs, full);
+    HashMap<String,Float> scoresY = getScores (full.search (queryY, null, 1000).scoreDocs, full);
+    HashMap<String,Float> scoresA = getScores (full.search (queryA, null, 1000).scoreDocs, full);
+
+    // we'll test searching locally, remote and multi
+    
+    MultiSearcher multi  = new MultiSearcher (new Searchable[] { searchX, searchY });
+
+    // change sorting and make sure relevancy stays the same
+
+    sort = new Sort();
+    assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
+    assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
+    assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
+
+    sort.setSort(SortField.FIELD_DOC);
+    assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
+    assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
+    assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
+
+    sort.setSort (new SortField("int", SortField.INT));
+    assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
+    assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
+    assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
+
+    sort.setSort (new SortField("float", SortField.FLOAT));
+    assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
+    assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
+    assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
+
+    sort.setSort (new SortField("string", SortField.STRING));
+    assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
+    assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
+    assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
+
+    sort.setSort (new SortField("int", SortField.INT),new SortField("float", SortField.FLOAT));
+    assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
+    assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
+    assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
+
+    sort.setSort (new SortField ("int", SortField.INT, true), new SortField (null, SortField.DOC, true) );
+    assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
+    assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
+    assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
+
+    sort.setSort (new SortField("int", SortField.INT),new SortField("string", SortField.STRING));
+    assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
+    assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
+    assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
+    assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
+
+  }
+
+  public void testTopDocsScores() throws Exception {
+
+    // There was previously a bug in FieldSortedHitQueue.maxscore when only a single
+    // doc was added.  That is what the following tests for.
+    Sort sort = new Sort();
+    int nDocs=10;
+
+    // try to pick a query that will result in an unnormalized
+    // score greater than 1 to test for correct normalization
+    final TopDocs docs1 = full.search(queryE,null,nDocs,sort);
+
+    // a filter that only allows through the first hit
+    Filter filt = new Filter() {
+      @Override
+      public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
+        BitSet bs = new BitSet(reader.maxDoc());
+        bs.set(0, reader.maxDoc());
+        bs.set(docs1.scoreDocs[0].doc);
+        return new DocIdBitSet(bs);
+      }
+    };
+
+    TopDocs docs2 = full.search(queryE, filt, nDocs, sort);
+    
+    assertEquals(docs1.scoreDocs[0].score, docs2.scoreDocs[0].score, 1e-6);
+  }
+  
+  public void testSortWithoutFillFields() throws Exception {
+    
+    // There was previously a bug in TopFieldCollector when fillFields was set
+    // to false - the same doc and score was set in ScoreDoc[] array. This test
+    // asserts that if fillFields is false, the documents are set properly. It
+    // does not use Searcher's default search methods (with Sort) since all set
+    // fillFields to true.
+    Sort[] sort = new Sort[] { new Sort(SortField.FIELD_DOC), new Sort() };
+    for (int i = 0; i < sort.length; i++) {
+      Query q = new MatchAllDocsQuery();
+      TopDocsCollector<Entry> tdc = TopFieldCollector.create(sort[i], 10, false,
+          false, false, true);
+      
+      full.search(q, tdc);
+      
+      ScoreDoc[] sd = tdc.topDocs().scoreDocs;
+      for (int j = 1; j < sd.length; j++) {
+        assertTrue(sd[j].doc != sd[j - 1].doc);
+      }
+      
+    }
+  }
+
+  public void testSortWithoutScoreTracking() throws Exception {
+
+    // Two Sort criteria to instantiate the multi/single comparators.
+    Sort[] sort = new Sort[] {new Sort(SortField.FIELD_DOC), new Sort() };
+    for (int i = 0; i < sort.length; i++) {
+      Query q = new MatchAllDocsQuery();
+      TopDocsCollector<Entry> tdc = TopFieldCollector.create(sort[i], 10, true, false,
+          false, true);
+      
+      full.search(q, tdc);
+      
+      TopDocs td = tdc.topDocs();
+      ScoreDoc[] sd = td.scoreDocs;
+      for (int j = 0; j < sd.length; j++) {
+        assertTrue(Float.isNaN(sd[j].score));
+      }
+      assertTrue(Float.isNaN(td.getMaxScore()));
+    }
+  }
+  
+  public void testSortWithScoreNoMaxScoreTracking() throws Exception {
+    
+    // Two Sort criteria to instantiate the multi/single comparators.
+    Sort[] sort = new Sort[] {new Sort(SortField.FIELD_DOC), new Sort() };
+    for (int i = 0; i < sort.length; i++) {
+      Query q = new MatchAllDocsQuery();
+      TopDocsCollector<Entry> tdc = TopFieldCollector.create(sort[i], 10, true, true,
+          false, true);
+      
+      full.search(q, tdc);
+      
+      TopDocs td = tdc.topDocs();
+      ScoreDoc[] sd = td.scoreDocs;
+      for (int j = 0; j < sd.length; j++) {
+        assertTrue(!Float.isNaN(sd[j].score));
+      }
+      assertTrue(Float.isNaN(td.getMaxScore()));
+    }
+  }
+  
+  // MultiComparatorScoringNoMaxScoreCollector
+  public void testSortWithScoreNoMaxScoreTrackingMulti() throws Exception {
+    
+    // Two Sort criteria to instantiate the multi/single comparators.
+    Sort[] sort = new Sort[] {new Sort(SortField.FIELD_DOC, SortField.FIELD_SCORE) };
+    for (int i = 0; i < sort.length; i++) {
+      Query q = new MatchAllDocsQuery();
+      TopDocsCollector<Entry> tdc = TopFieldCollector.create(sort[i], 10, true, true,
+          false, true);
+
+      full.search(q, tdc);
+      
+      TopDocs td = tdc.topDocs();
+      ScoreDoc[] sd = td.scoreDocs;
+      for (int j = 0; j < sd.length; j++) {
+        assertTrue(!Float.isNaN(sd[j].score));
+      }
+      assertTrue(Float.isNaN(td.getMaxScore()));
+    }
+  }
+  
+  public void testSortWithScoreAndMaxScoreTracking() throws Exception {
+    
+    // Two Sort criteria to instantiate the multi/single comparators.
+    Sort[] sort = new Sort[] {new Sort(SortField.FIELD_DOC), new Sort() };
+    for (int i = 0; i < sort.length; i++) {
+      Query q = new MatchAllDocsQuery();
+      TopDocsCollector<Entry> tdc = TopFieldCollector.create(sort[i], 10, true, true,
+          true, true);
+      
+      full.search(q, tdc);
+      
+      TopDocs td = tdc.topDocs();
+      ScoreDoc[] sd = td.scoreDocs;
+      for (int j = 0; j < sd.length; j++) {
+        assertTrue(!Float.isNaN(sd[j].score));
+      }
+      assertTrue(!Float.isNaN(td.getMaxScore()));
+    }
+  }
+  
+  public void testOutOfOrderDocsScoringSort() throws Exception {
+
+    // Two Sort criteria to instantiate the multi/single comparators.
+    Sort[] sort = new Sort[] {new Sort(SortField.FIELD_DOC), new Sort() };
+    boolean[][] tfcOptions = new boolean[][] {
+        new boolean[] { false, false, false },
+        new boolean[] { false, false, true },
+        new boolean[] { false, true, false },
+        new boolean[] { false, true, true },
+        new boolean[] { true, false, false },
+        new boolean[] { true, false, true },
+        new boolean[] { true, true, false },
+        new boolean[] { true, true, true },
+    };
+    String[] actualTFCClasses = new String[] {
+        "OutOfOrderOneComparatorNonScoringCollector", 
+        "OutOfOrderOneComparatorScoringMaxScoreCollector", 
+        "OutOfOrderOneComparatorScoringNoMaxScoreCollector", 
+        "OutOfOrderOneComparatorScoringMaxScoreCollector", 
+        "OutOfOrderOneComparatorNonScoringCollector", 
+        "OutOfOrderOneComparatorScoringMaxScoreCollector", 
+        "OutOfOrderOneComparatorScoringNoMaxScoreCollector", 
+        "OutOfOrderOneComparatorScoringMaxScoreCollector" 
+    };
+    
+    BooleanQuery bq = new BooleanQuery();
+    // Add a Query with SHOULD, since bw.scorer() returns BooleanScorer2
+    // which delegates to BS if there are no mandatory clauses.
+    bq.add(new MatchAllDocsQuery(), Occur.SHOULD);
+    // Set minNrShouldMatch to 1 so that BQ will not optimize rewrite to return
+    // the clause instead of BQ.
+    bq.setMinimumNumberShouldMatch(1);
+    for (int i = 0; i < sort.length; i++) {
+      for (int j = 0; j < tfcOptions.length; j++) {
+        TopDocsCollector<Entry> tdc = TopFieldCollector.create(sort[i], 10,
+            tfcOptions[j][0], tfcOptions[j][1], tfcOptions[j][2], false);
+
+        assertTrue(tdc.getClass().getName().endsWith("$"+actualTFCClasses[j]));
+        
+        full.search(bq, tdc);
+        
+        TopDocs td = tdc.topDocs();
+        ScoreDoc[] sd = td.scoreDocs;
+        assertEquals(10, sd.length);
+      }
+    }
+  }
+  
+  // OutOfOrderMulti*Collector
+  public void testOutOfOrderDocsScoringSortMulti() throws Exception {
+
+    // Two Sort criteria to instantiate the multi/single comparators.
+    Sort[] sort = new Sort[] {new Sort(SortField.FIELD_DOC, SortField.FIELD_SCORE) };
+    boolean[][] tfcOptions = new boolean[][] {
+        new boolean[] { false, false, false },
+        new boolean[] { false, false, true },
+        new boolean[] { false, true, false },
+        new boolean[] { false, true, true },
+        new boolean[] { true, false, false },
+        new boolean[] { true, false, true },
+        new boolean[] { true, true, false },
+        new boolean[] { true, true, true },
+    };
+    String[] actualTFCClasses = new String[] {
+        "OutOfOrderMultiComparatorNonScoringCollector", 
+        "OutOfOrderMultiComparatorScoringMaxScoreCollector", 
+        "OutOfOrderMultiComparatorScoringNoMaxScoreCollector", 
+        "OutOfOrderMultiComparatorScoringMaxScoreCollector", 
+        "OutOfOrderMultiComparatorNonScoringCollector", 
+        "OutOfOrderMultiComparatorScoringMaxScoreCollector", 
+        "OutOfOrderMultiComparatorScoringNoMaxScoreCollector", 
+        "OutOfOrderMultiComparatorScoringMaxScoreCollector" 
+    };
+    
+    BooleanQuery bq = new BooleanQuery();
+    // Add a Query with SHOULD, since bw.scorer() returns BooleanScorer2
+    // which delegates to BS if there are no mandatory clauses.
+    bq.add(new MatchAllDocsQuery(), Occur.SHOULD);
+    // Set minNrShouldMatch to 1 so that BQ will not optimize rewrite to return
+    // the clause instead of BQ.
+    bq.setMinimumNumberShouldMatch(1);
+    for (int i = 0; i < sort.length; i++) {
+      for (int j = 0; j < tfcOptions.length; j++) {
+        TopDocsCollector<Entry> tdc = TopFieldCollector.create(sort[i], 10,
+            tfcOptions[j][0], tfcOptions[j][1], tfcOptions[j][2], false);
+
+        assertTrue(tdc.getClass().getName().endsWith("$"+actualTFCClasses[j]));
+        
+        full.search(bq, tdc);
+        
+        TopDocs td = tdc.topDocs();
+        ScoreDoc[] sd = td.scoreDocs;
+        assertEquals(10, sd.length);
+      }
+    }
+  }
+  
+  public void testSortWithScoreAndMaxScoreTrackingNoResults() throws Exception {
+    
+    // Two Sort criteria to instantiate the multi/single comparators.
+    Sort[] sort = new Sort[] {new Sort(SortField.FIELD_DOC), new Sort() };
+    for (int i = 0; i < sort.length; i++) {
+      TopDocsCollector<Entry> tdc = TopFieldCollector.create(sort[i], 10, true, true, true, true);
+      TopDocs td = tdc.topDocs();
+      assertEquals(0, td.totalHits);
+      assertTrue(Float.isNaN(td.getMaxScore()));
+    }
+  }
+  
+  // runs a variety of sorts useful for multisearchers
+  private void runMultiSorts(Searcher multi, boolean isFull) throws Exception {
+    sort.setSort(SortField.FIELD_DOC);
+    String expected = isFull ? "ABCDEFGHIJ" : "ACEGIBDFHJ";
+    assertMatches(multi, queryA, sort, expected);
+
+    sort.setSort(new SortField ("int", SortField.INT));
+    expected = isFull ? "IDHFGJABEC" : "IDHFGJAEBC";
+    assertMatches(multi, queryA, sort, expected);
+
+    sort.setSort(new SortField ("int", SortField.INT), SortField.FIELD_DOC);
+    expected = isFull ? "IDHFGJABEC" : "IDHFGJAEBC";
+    assertMatches(multi, queryA, sort, expected);
+
+    sort.setSort(new SortField("int", SortField.INT));
+    expected = isFull ? "IDHFGJABEC" : "IDHFGJAEBC";
+    assertMatches(multi, queryA, sort, expected);
+
+    sort.setSort(new SortField ("float", SortField.FLOAT), SortField.FIELD_DOC);
+    assertMatches(multi, queryA, sort, "GDHJCIEFAB");
+
+    sort.setSort(new SortField("float", SortField.FLOAT));
+    assertMatches(multi, queryA, sort, "GDHJCIEFAB");
+
+    sort.setSort(new SortField("string", SortField.STRING));
+    assertMatches(multi, queryA, sort, "DJAIHGFEBC");
+
+    sort.setSort(new SortField("int", SortField.INT, true));
+    expected = isFull ? "CABEJGFHDI" : "CAEBJGFHDI";
+    assertMatches(multi, queryA, sort, expected);
+
+    sort.setSort(new SortField("float", SortField.FLOAT, true));
+    assertMatches(multi, queryA, sort, "BAFECIJHDG");
+
+    sort.setSort(new SortField("string", SortField.STRING, true));
+    assertMatches(multi, queryA, sort, "CBEFGHIAJD");
+
+    sort.setSort(new SortField("int", SortField.INT),new SortField("float", SortField.FLOAT));
+    assertMatches(multi, queryA, sort, "IDHFGJEABC");
+
+    sort.setSort(new SortField("float", SortField.FLOAT),new SortField("string", SortField.STRING));
+    assertMatches(multi, queryA, sort, "GDHJICEFAB");
+
+    sort.setSort(new SortField ("int", SortField.INT));
+    assertMatches(multi, queryF, sort, "IZJ");
+
+    sort.setSort(new SortField ("int", SortField.INT, true));
+    assertMatches(multi, queryF, sort, "JZI");
+
+    sort.setSort(new SortField ("float", SortField.FLOAT));
+    assertMatches(multi, queryF, sort, "ZJI");
+
+    sort.setSort(new SortField ("string", SortField.STRING));
+    assertMatches(multi, queryF, sort, "ZJI");
+
+    sort.setSort(new SortField ("string", SortField.STRING, true));
+    assertMatches(multi, queryF, sort, "IJZ");
+
+    // up to this point, all of the searches should have "sane" 
+    // FieldCache behavior, and should have reused hte cache in several cases
+    assertSaneFieldCaches(getName() + " various");
+    // next we'll check Locale based (String[]) for 'string', so purge first
+    FieldCache.DEFAULT.purgeAllCaches();
+
+    sort.setSort(new SortField ("string", Locale.US) );
+    assertMatches(multi, queryA, sort, "DJAIHGFEBC");
+
+    sort.setSort(new SortField ("string", Locale.US, true) );
+    assertMatches(multi, queryA, sort, "CBEFGHIAJD");
+
+    sort.setSort(new SortField ("string", Locale.UK) );
+    assertMatches(multi, queryA, sort, "DJAIHGFEBC");
+
+    assertSaneFieldCaches(getName() + " Locale.US + Locale.UK");
+    FieldCache.DEFAULT.purgeAllCaches();
+
+  }
+
+  private void assertMatches(Searcher searcher, Query query, Sort sort, String expectedResult) throws IOException {
+    assertMatches( null, searcher, query, sort, expectedResult );
+  }
+
+  // make sure the documents returned by the search match the expected list
+  private void assertMatches(String msg, Searcher searcher, Query query, Sort sort,
+      String expectedResult) throws IOException {
+    //ScoreDoc[] result = searcher.search (query, null, 1000, sort).scoreDocs;
+    TopDocs hits = searcher.search (query, null, Math.max(1, expectedResult.length()), sort);
+    ScoreDoc[] result = hits.scoreDocs;
+    assertEquals(expectedResult.length(),hits.totalHits);
+    StringBuilder buff = new StringBuilder(10);
+    int n = result.length;
+    for (int i=0; i<n; ++i) {
+      Document doc = searcher.doc(result[i].doc);
+      String[] v = doc.getValues("tracer");
+      for (int j=0; j<v.length; ++j) {
+        buff.append (v[j]);
+      }
+    }
+    assertEquals (msg, expectedResult, buff.toString());
+  }
+
+  private HashMap<String,Float> getScores (ScoreDoc[] hits, Searcher searcher)
+  throws IOException {
+    HashMap<String,Float> scoreMap = new HashMap<String,Float>();
+    int n = hits.length;
+    for (int i=0; i<n; ++i) {
+      Document doc = searcher.doc(hits[i].doc);
+      String[] v = doc.getValues("tracer");
+      assertEquals (v.length, 1);
+      scoreMap.put (v[0], Float.valueOf(hits[i].score));
+    }
+    return scoreMap;
+  }
+
+  // make sure all the values in the maps match
+  private <K, V> void assertSameValues (HashMap<K,V> m1, HashMap<K,V> m2) {
+    int n = m1.size();
+    int m = m2.size();
+    assertEquals (n, m);
+    Iterator<K> iter = m1.keySet().iterator();
+    while (iter.hasNext()) {
+      K key = iter.next();
+      V o1 = m1.get(key);
+      V o2 = m2.get(key);
+      if (o1 instanceof Float) {
+        assertEquals(((Float)o1).floatValue(), ((Float)o2).floatValue(), 1e-6);
+      } else {
+        assertEquals (m1.get(key), m2.get(key));
+      }
+    }
+  }
+
+  public void testEmptyStringVsNullStringSort() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
+                        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    Document doc = new Document();
+    doc.add(newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED));
+    doc.add(newField("t", "1", Field.Store.NO, Field.Index.NOT_ANALYZED));
+    w.addDocument(doc);
+    w.commit();
+    doc = new Document();
+    doc.add(newField("t", "1", Field.Store.NO, Field.Index.NOT_ANALYZED));
+    w.addDocument(doc);
+
+    IndexReader r = IndexReader.open(w, true);
+    w.close();
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new TermQuery(new Term("t", "1")), null, 10, new Sort(new SortField("f", SortField.STRING)));
+    assertEquals(2, hits.totalHits);
+    // null sorts first
+    assertEquals(1, hits.scoreDocs[0].doc);
+    assertEquals(0, hits.scoreDocs[1].doc);
+    s.close();
+    r.close();
+    dir.close();
+  }
+
+  public void testLUCENE2142() throws IOException {
+    Directory indexStore = newDirectory();
+    IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    for (int i=0; i<5; i++) {
+        Document doc = new Document();
+        doc.add (new Field ("string", "a"+i, Field.Store.NO, Field.Index.NOT_ANALYZED));
+        doc.add (new Field ("string", "b"+i, Field.Store.NO, Field.Index.NOT_ANALYZED));
+        writer.addDocument (doc);
+    }
+    writer.optimize(); // enforce one segment to have a higher unique term count in all cases
+    writer.close();
+    sort.setSort(
+        new SortField("string", SortField.STRING),
+        SortField.FIELD_DOC );
+    // this should not throw AIOOBE or RuntimeEx
+    IndexSearcher searcher = new IndexSearcher(indexStore, true);
+    searcher.search(new MatchAllDocsQuery(), null, 500, sort);
+    searcher.close();
+    indexStore.close();
+  }
+
+  public void testCountingCollector() throws Exception {
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, indexStore);
+    for (int i=0; i<5; i++) {
+      Document doc = new Document();
+      doc.add (new Field ("string", "a"+i, Field.Store.NO, Field.Index.NOT_ANALYZED));
+      doc.add (new Field ("string", "b"+i, Field.Store.NO, Field.Index.NOT_ANALYZED));
+      writer.addDocument (doc);
+    }
+    IndexReader reader = writer.getReader();
+    writer.close();
+
+    IndexSearcher searcher = newSearcher(reader);
+    TotalHitCountCollector c = new TotalHitCountCollector();
+    searcher.search(new MatchAllDocsQuery(), null, c);
+    assertEquals(5, c.getTotalHits());
+    searcher.close();
+    reader.close();
+    indexStore.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestSpanQueryFilter.java b/lucene/backwards/src/test/org/apache/lucene/search/TestSpanQueryFilter.java
new file mode 100644
index 0000000..f555dc2
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestSpanQueryFilter.java
@@ -0,0 +1,81 @@
+package org.apache.lucene.search;
+
+/**
+ * Copyright 2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.List;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.English;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestSpanQueryFilter extends LuceneTestCase {
+
+  public void testFilterWorks() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    for (int i = 0; i < 500; i++) {
+      Document document = new Document();
+      document.add(newField("field", English.intToEnglish(i) + " equals " + English.intToEnglish(i),
+              Field.Store.NO, Field.Index.ANALYZED));
+      writer.addDocument(document);
+    }
+    IndexReader reader = writer.getReader();
+    writer.close();
+
+    SpanTermQuery query = new SpanTermQuery(new Term("field", English.intToEnglish(10).trim()));
+    SpanQueryFilter filter = new SpanQueryFilter(query);
+    SpanFilterResult result = filter.bitSpans(reader);
+    DocIdSet docIdSet = result.getDocIdSet();
+    assertTrue("docIdSet is null and it shouldn't be", docIdSet != null);
+    assertContainsDocId("docIdSet doesn't contain docId 10", docIdSet, 10);
+    List<SpanFilterResult.PositionInfo> spans = result.getPositions();
+    assertTrue("spans is null and it shouldn't be", spans != null);
+    int size = getDocIdSetSize(docIdSet);
+    assertTrue("spans Size: " + spans.size() + " is not: " + size, spans.size() == size);
+    for (final SpanFilterResult.PositionInfo info: spans) {
+      assertTrue("info is null and it shouldn't be", info != null);
+      //The doc should indicate the bit is on
+      assertContainsDocId("docIdSet doesn't contain docId " + info.getDoc(), docIdSet, info.getDoc());
+      //There should be two positions in each
+      assertTrue("info.getPositions() Size: " + info.getPositions().size() + " is not: " + 2, info.getPositions().size() == 2);
+    }
+    reader.close();
+    dir.close();
+  }
+  
+  int getDocIdSetSize(DocIdSet docIdSet) throws Exception {
+    int size = 0;
+    DocIdSetIterator it = docIdSet.iterator();
+    while (it.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+      size++;
+    }
+    return size;
+  }
+  
+  public void assertContainsDocId(String msg, DocIdSet docIdSet, int docId) throws Exception {
+    DocIdSetIterator it = docIdSet.iterator();
+    assertTrue(msg, it.advance(docId) != DocIdSetIterator.NO_MORE_DOCS);
+    assertTrue(msg, it.docID() == docId);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestSubScorerFreqs.java b/lucene/backwards/src/test/org/apache/lucene/search/TestSubScorerFreqs.java
new file mode 100644
index 0000000..46d7ed9
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestSubScorerFreqs.java
@@ -0,0 +1,226 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import java.io.*;
+import java.util.*;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.Scorer.ScorerVisitor;
+import org.apache.lucene.store.*;
+import org.apache.lucene.util.*;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestSubScorerFreqs extends LuceneTestCase {
+
+  private static Directory dir;
+  private static IndexSearcher s;
+
+  @BeforeClass
+  public static void makeIndex() throws Exception {
+    dir = new RAMDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(
+                                                random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    // make sure we have more than one segment occationally
+    int num = atLeast(31);
+    for (int i = 0; i < num; i++) {
+      Document doc = new Document();
+      doc.add(newField("f", "a b c d b c d c d d", Field.Store.NO,
+          Field.Index.ANALYZED));
+      w.addDocument(doc);
+
+      doc = new Document();
+      doc.add(newField("f", "a b c d", Field.Store.NO, Field.Index.ANALYZED));
+      w.addDocument(doc);
+    }
+
+    s = newSearcher(w.getReader());
+    w.close();
+  }
+
+  @AfterClass
+  public static void finish() throws Exception {
+    s.getIndexReader().close();
+    s.close();
+    s = null;
+    dir.close();
+    dir = null;
+  }
+
+  private static class CountingCollector extends Collector {
+    private final Collector other;
+    private int docBase;
+
+    public final Map<Integer, Map<Query, Float>> docCounts = new HashMap<Integer, Map<Query, Float>>();
+
+    private final Map<Query, Scorer> subScorers = new HashMap<Query, Scorer>();
+    private final ScorerVisitor<Query, Query, Scorer> visitor = new MockScorerVisitor();
+    private final EnumSet<Occur> collect;
+
+    private class MockScorerVisitor extends ScorerVisitor<Query, Query, Scorer> {
+
+      @Override
+      public void visitOptional(Query parent, Query child, Scorer scorer) {
+        if (collect.contains(Occur.SHOULD))
+          subScorers.put(child, scorer);
+      }
+
+      @Override
+      public void visitProhibited(Query parent, Query child, Scorer scorer) {
+        if (collect.contains(Occur.MUST_NOT))
+          subScorers.put(child, scorer);
+      }
+
+      @Override
+      public void visitRequired(Query parent, Query child, Scorer scorer) {
+        if (collect.contains(Occur.MUST))
+          subScorers.put(child, scorer);
+      }
+
+    }
+
+    public CountingCollector(Collector other) {
+      this(other, EnumSet.allOf(Occur.class));
+    }
+
+    public CountingCollector(Collector other, EnumSet<Occur> collect) {
+      this.other = other;
+      this.collect = collect;
+    }
+
+    @Override
+    public void setScorer(Scorer scorer) throws IOException {
+      other.setScorer(scorer);
+      scorer.visitScorers(visitor);
+    }
+
+    @Override
+    public void collect(int doc) throws IOException {
+      final Map<Query, Float> freqs = new HashMap<Query, Float>();
+      for (Map.Entry<Query, Scorer> ent : subScorers.entrySet()) {
+        Scorer value = ent.getValue();
+        int matchId = value.docID();
+        freqs.put(ent.getKey(), matchId == doc ? value.freq() : 0.0f);
+      }
+      docCounts.put(doc + docBase, freqs);
+      other.collect(doc);
+    }
+
+    @Override
+    public void setNextReader(IndexReader reader, int docBase)
+        throws IOException {
+      this.docBase = docBase;
+      other.setNextReader(reader, docBase);
+    }
+
+    @Override
+    public boolean acceptsDocsOutOfOrder() {
+      return other.acceptsDocsOutOfOrder();
+    }
+  }
+
+  private static final float FLOAT_TOLERANCE = 0.00001F;
+
+  @Test
+  public void testTermQuery() throws Exception {
+    TermQuery q = new TermQuery(new Term("f", "d"));
+    CountingCollector c = new CountingCollector(TopScoreDocCollector.create(10,
+        true));
+    s.search(q, null, c);
+    final int maxDocs = s.maxDoc();
+    assertEquals(maxDocs, c.docCounts.size());
+    for (int i = 0; i < maxDocs; i++) {
+      Map<Query, Float> doc0 = c.docCounts.get(i);
+      assertEquals(1, doc0.size());
+      assertEquals(4.0F, doc0.get(q), FLOAT_TOLERANCE);
+
+      Map<Query, Float> doc1 = c.docCounts.get(++i);
+      assertEquals(1, doc1.size());
+      assertEquals(1.0F, doc1.get(q), FLOAT_TOLERANCE);
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  @Test
+  public void testBooleanQuery() throws Exception {
+    TermQuery aQuery = new TermQuery(new Term("f", "a"));
+    TermQuery dQuery = new TermQuery(new Term("f", "d"));
+    TermQuery cQuery = new TermQuery(new Term("f", "c"));
+    TermQuery yQuery = new TermQuery(new Term("f", "y"));
+
+    BooleanQuery query = new BooleanQuery();
+    BooleanQuery inner = new BooleanQuery();
+
+    inner.add(cQuery, Occur.SHOULD);
+    inner.add(yQuery, Occur.MUST_NOT);
+    query.add(inner, Occur.MUST);
+    query.add(aQuery, Occur.MUST);
+    query.add(dQuery, Occur.MUST);
+    EnumSet<Occur>[] occurList = new EnumSet[] {EnumSet.of(Occur.MUST), EnumSet.of(Occur.MUST, Occur.SHOULD)};
+    for (EnumSet<Occur> occur : occurList) {
+      CountingCollector c = new CountingCollector(TopScoreDocCollector.create(
+          10, true), occur);
+      s.search(query, null, c);
+      final int maxDocs = s.maxDoc();
+      assertEquals(maxDocs, c.docCounts.size());
+      boolean includeOptional = occur.contains(Occur.SHOULD);
+      for (int i = 0; i < maxDocs; i++) {
+        Map<Query, Float> doc0 = c.docCounts.get(i);
+        assertEquals(includeOptional ? 5 : 4, doc0.size());
+        assertEquals(1.0F, doc0.get(aQuery), FLOAT_TOLERANCE);
+        assertEquals(4.0F, doc0.get(dQuery), FLOAT_TOLERANCE);
+        if (includeOptional)
+          assertEquals(3.0F, doc0.get(cQuery), FLOAT_TOLERANCE);
+
+        Map<Query, Float> doc1 = c.docCounts.get(++i);
+        assertEquals(includeOptional ? 5 : 4, doc1.size());
+        assertEquals(1.0F, doc1.get(aQuery), FLOAT_TOLERANCE);
+        assertEquals(1.0F, doc1.get(dQuery), FLOAT_TOLERANCE);
+        if (includeOptional)
+          assertEquals(1.0F, doc1.get(cQuery), FLOAT_TOLERANCE);
+
+      }
+    }
+  }
+
+  @Test
+  public void testPhraseQuery() throws Exception {
+    PhraseQuery q = new PhraseQuery();
+    q.add(new Term("f", "b"));
+    q.add(new Term("f", "c"));
+    CountingCollector c = new CountingCollector(TopScoreDocCollector.create(10,
+        true));
+    s.search(q, null, c);
+    final int maxDocs = s.maxDoc();
+    assertEquals(maxDocs, c.docCounts.size());
+    for (int i = 0; i < maxDocs; i++) {
+      Map<Query, Float> doc0 = c.docCounts.get(i);
+      assertEquals(1, doc0.size());
+      assertEquals(2.0F, doc0.get(q), FLOAT_TOLERANCE);
+
+      Map<Query, Float> doc1 = c.docCounts.get(++i);
+      assertEquals(1, doc1.size());
+      assertEquals(1.0F, doc1.get(q), FLOAT_TOLERANCE);
+    }
+
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestTermRangeFilter.java b/lucene/backwards/src/test/org/apache/lucene/search/TestTermRangeFilter.java
new file mode 100644
index 0000000..03b2b06
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestTermRangeFilter.java
@@ -0,0 +1,488 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.text.Collator;
+import java.util.Locale;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.store.Directory;
+import org.junit.Test;
+
+/**
+ * A basic 'positive' Unit test class for the TermRangeFilter class.
+ * 
+ * <p>
+ * NOTE: at the moment, this class only tests for 'positive' results, it does
+ * not verify the results to ensure there are no 'false positives', nor does it
+ * adequately test 'negative' results. It also does not test that garbage in
+ * results in an Exception.
+ */
+public class TestTermRangeFilter extends BaseTestRangeFilter {
+  
+  @Test
+  public void testRangeFilterId() throws IOException {
+    
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+    
+    int medId = ((maxId - minId) / 2);
+    
+    String minIP = pad(minId);
+    String maxIP = pad(maxId);
+    String medIP = pad(medId);
+    
+    int numDocs = reader.numDocs();
+    
+    assertEquals("num of docs", numDocs, 1 + maxId - minId);
+    
+    ScoreDoc[] result;
+    Query q = new TermQuery(new Term("body", "body"));
+    
+    // test id, bounded on both ends
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, maxIP, T, T),
+        numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, maxIP, T, F),
+        numDocs).scoreDocs;
+    assertEquals("all but last", numDocs - 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, maxIP, F, T),
+        numDocs).scoreDocs;
+    assertEquals("all but first", numDocs - 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, maxIP, F, F),
+        numDocs).scoreDocs;
+    assertEquals("all but ends", numDocs - 2, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", medIP, maxIP, T, T),
+        numDocs).scoreDocs;
+    assertEquals("med and up", 1 + maxId - medId, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, medIP, T, T),
+        numDocs).scoreDocs;
+    assertEquals("up to med", 1 + medId - minId, result.length);
+    
+    // unbounded id
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, null, T, F),
+        numDocs).scoreDocs;
+    assertEquals("min and up", numDocs, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", null, maxIP, F, T),
+        numDocs).scoreDocs;
+    assertEquals("max and down", numDocs, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, null, F, F),
+        numDocs).scoreDocs;
+    assertEquals("not min, but up", numDocs - 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", null, maxIP, F, F),
+        numDocs).scoreDocs;
+    assertEquals("not max, but down", numDocs - 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", medIP, maxIP, T, F),
+        numDocs).scoreDocs;
+    assertEquals("med and up, not max", maxId - medId, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, medIP, F, T),
+        numDocs).scoreDocs;
+    assertEquals("not min, up to med", medId - minId, result.length);
+    
+    // very small sets
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, minIP, F, F),
+        numDocs).scoreDocs;
+    assertEquals("min,min,F,F", 0, result.length);
+    result = search.search(q, new TermRangeFilter("id", medIP, medIP, F, F),
+        numDocs).scoreDocs;
+    assertEquals("med,med,F,F", 0, result.length);
+    result = search.search(q, new TermRangeFilter("id", maxIP, maxIP, F, F),
+        numDocs).scoreDocs;
+    assertEquals("max,max,F,F", 0, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", minIP, minIP, T, T),
+        numDocs).scoreDocs;
+    assertEquals("min,min,T,T", 1, result.length);
+    result = search.search(q, new TermRangeFilter("id", null, minIP, F, T),
+        numDocs).scoreDocs;
+    assertEquals("nul,min,F,T", 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", maxIP, maxIP, T, T),
+        numDocs).scoreDocs;
+    assertEquals("max,max,T,T", 1, result.length);
+    result = search.search(q, new TermRangeFilter("id", maxIP, null, T, F),
+        numDocs).scoreDocs;
+    assertEquals("max,nul,T,T", 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("id", medIP, medIP, T, T),
+        numDocs).scoreDocs;
+    assertEquals("med,med,T,T", 1, result.length);
+    
+    search.close();
+  }
+  
+  @Test
+  public void testRangeFilterIdCollating() throws IOException {
+    
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+    
+    Collator c = Collator.getInstance(Locale.ENGLISH);
+    
+    int medId = ((maxId - minId) / 2);
+    
+    String minIP = pad(minId);
+    String maxIP = pad(maxId);
+    String medIP = pad(medId);
+    
+    int numDocs = reader.numDocs();
+    
+    assertEquals("num of docs", numDocs, 1 + maxId - minId);
+    
+    Query q = new TermQuery(new Term("body", "body"));
+    
+    // test id, bounded on both ends
+    int numHits = search.search(q, new TermRangeFilter("id", minIP, maxIP, T,
+        T, c), 1000).totalHits;
+    assertEquals("find all", numDocs, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", minIP, maxIP, T, F, c), 1000).totalHits;
+    assertEquals("all but last", numDocs - 1, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", minIP, maxIP, F, T, c), 1000).totalHits;
+    assertEquals("all but first", numDocs - 1, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", minIP, maxIP, F, F, c), 1000).totalHits;
+    assertEquals("all but ends", numDocs - 2, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", medIP, maxIP, T, T, c), 1000).totalHits;
+    assertEquals("med and up", 1 + maxId - medId, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", minIP, medIP, T, T, c), 1000).totalHits;
+    assertEquals("up to med", 1 + medId - minId, numHits);
+    
+    // unbounded id
+    
+    numHits = search.search(q, new TermRangeFilter("id", minIP, null, T, F, c),
+        1000).totalHits;
+    assertEquals("min and up", numDocs, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("id", null, maxIP, F, T, c),
+        1000).totalHits;
+    assertEquals("max and down", numDocs, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("id", minIP, null, F, F, c),
+        1000).totalHits;
+    assertEquals("not min, but up", numDocs - 1, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("id", null, maxIP, F, F, c),
+        1000).totalHits;
+    assertEquals("not max, but down", numDocs - 1, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", medIP, maxIP, T, F, c), 1000).totalHits;
+    assertEquals("med and up, not max", maxId - medId, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", minIP, medIP, F, T, c), 1000).totalHits;
+    assertEquals("not min, up to med", medId - minId, numHits);
+    
+    // very small sets
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", minIP, minIP, F, F, c), 1000).totalHits;
+    assertEquals("min,min,F,F", 0, numHits);
+    numHits = search.search(q,
+        new TermRangeFilter("id", medIP, medIP, F, F, c), 1000).totalHits;
+    assertEquals("med,med,F,F", 0, numHits);
+    numHits = search.search(q,
+        new TermRangeFilter("id", maxIP, maxIP, F, F, c), 1000).totalHits;
+    assertEquals("max,max,F,F", 0, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", minIP, minIP, T, T, c), 1000).totalHits;
+    assertEquals("min,min,T,T", 1, numHits);
+    numHits = search.search(q, new TermRangeFilter("id", null, minIP, F, T, c),
+        1000).totalHits;
+    assertEquals("nul,min,F,T", 1, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", maxIP, maxIP, T, T, c), 1000).totalHits;
+    assertEquals("max,max,T,T", 1, numHits);
+    numHits = search.search(q, new TermRangeFilter("id", maxIP, null, T, F, c),
+        1000).totalHits;
+    assertEquals("max,nul,T,T", 1, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("id", medIP, medIP, T, T, c), 1000).totalHits;
+    assertEquals("med,med,T,T", 1, numHits);
+    
+    search.close();
+  }
+  
+  @Test
+  public void testRangeFilterRand() throws IOException {
+    
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+    
+    String minRP = pad(signedIndexDir.minR);
+    String maxRP = pad(signedIndexDir.maxR);
+    
+    int numDocs = reader.numDocs();
+    
+    assertEquals("num of docs", numDocs, 1 + maxId - minId);
+    
+    ScoreDoc[] result;
+    Query q = new TermQuery(new Term("body", "body"));
+    
+    // test extremes, bounded on both ends
+    
+    result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T, T),
+        numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+    
+    result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T, F),
+        numDocs).scoreDocs;
+    assertEquals("all but biggest", numDocs - 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, T),
+        numDocs).scoreDocs;
+    assertEquals("all but smallest", numDocs - 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, F),
+        numDocs).scoreDocs;
+    assertEquals("all but extremes", numDocs - 2, result.length);
+    
+    // unbounded
+    
+    result = search.search(q, new TermRangeFilter("rand", minRP, null, T, F),
+        numDocs).scoreDocs;
+    assertEquals("smallest and up", numDocs, result.length);
+    
+    result = search.search(q, new TermRangeFilter("rand", null, maxRP, F, T),
+        numDocs).scoreDocs;
+    assertEquals("biggest and down", numDocs, result.length);
+    
+    result = search.search(q, new TermRangeFilter("rand", minRP, null, F, F),
+        numDocs).scoreDocs;
+    assertEquals("not smallest, but up", numDocs - 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("rand", null, maxRP, F, F),
+        numDocs).scoreDocs;
+    assertEquals("not biggest, but down", numDocs - 1, result.length);
+    
+    // very small sets
+    
+    result = search.search(q, new TermRangeFilter("rand", minRP, minRP, F, F),
+        numDocs).scoreDocs;
+    assertEquals("min,min,F,F", 0, result.length);
+    result = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, F, F),
+        numDocs).scoreDocs;
+    assertEquals("max,max,F,F", 0, result.length);
+    
+    result = search.search(q, new TermRangeFilter("rand", minRP, minRP, T, T),
+        numDocs).scoreDocs;
+    assertEquals("min,min,T,T", 1, result.length);
+    result = search.search(q, new TermRangeFilter("rand", null, minRP, F, T),
+        numDocs).scoreDocs;
+    assertEquals("nul,min,F,T", 1, result.length);
+    
+    result = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, T, T),
+        numDocs).scoreDocs;
+    assertEquals("max,max,T,T", 1, result.length);
+    result = search.search(q, new TermRangeFilter("rand", maxRP, null, T, F),
+        numDocs).scoreDocs;
+    assertEquals("max,nul,T,T", 1, result.length);
+    
+    search.close();
+  }
+  
+  @Test
+  public void testRangeFilterRandCollating() throws IOException {
+    
+    // using the unsigned index because collation seems to ignore hyphens
+    IndexReader reader = unsignedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+    
+    Collator c = Collator.getInstance(Locale.ENGLISH);
+    
+    String minRP = pad(unsignedIndexDir.minR);
+    String maxRP = pad(unsignedIndexDir.maxR);
+    
+    int numDocs = reader.numDocs();
+    
+    assertEquals("num of docs", numDocs, 1 + maxId - minId);
+    
+    Query q = new TermQuery(new Term("body", "body"));
+    
+    // test extremes, bounded on both ends
+    
+    int numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T,
+        T, c), 1000).totalHits;
+    assertEquals("find all", numDocs, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T, F,
+        c), 1000).totalHits;
+    assertEquals("all but biggest", numDocs - 1, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, T,
+        c), 1000).totalHits;
+    assertEquals("all but smallest", numDocs - 1, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, F,
+        c), 1000).totalHits;
+    assertEquals("all but extremes", numDocs - 2, numHits);
+    
+    // unbounded
+    
+    numHits = search.search(q,
+        new TermRangeFilter("rand", minRP, null, T, F, c), 1000).totalHits;
+    assertEquals("smallest and up", numDocs, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("rand", null, maxRP, F, T, c), 1000).totalHits;
+    assertEquals("biggest and down", numDocs, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("rand", minRP, null, F, F, c), 1000).totalHits;
+    assertEquals("not smallest, but up", numDocs - 1, numHits);
+    
+    numHits = search.search(q,
+        new TermRangeFilter("rand", null, maxRP, F, F, c), 1000).totalHits;
+    assertEquals("not biggest, but down", numDocs - 1, numHits);
+    
+    // very small sets
+    
+    numHits = search.search(q, new TermRangeFilter("rand", minRP, minRP, F, F,
+        c), 1000).totalHits;
+    assertEquals("min,min,F,F", 0, numHits);
+    numHits = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, F, F,
+        c), 1000).totalHits;
+    assertEquals("max,max,F,F", 0, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("rand", minRP, minRP, T, T,
+        c), 1000).totalHits;
+    assertEquals("min,min,T,T", 1, numHits);
+    numHits = search.search(q,
+        new TermRangeFilter("rand", null, minRP, F, T, c), 1000).totalHits;
+    assertEquals("nul,min,F,T", 1, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, T, T,
+        c), 1000).totalHits;
+    assertEquals("max,max,T,T", 1, numHits);
+    numHits = search.search(q,
+        new TermRangeFilter("rand", maxRP, null, T, F, c), 1000).totalHits;
+    assertEquals("max,nul,T,T", 1, numHits);
+    
+    search.close();
+  }
+  
+  @Test
+  public void testFarsi() throws Exception {
+    
+    /* build an index */
+    Directory farsiIndex = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, farsiIndex);
+    Document doc = new Document();
+    doc.add(newField("content", "\u0633\u0627\u0628", Field.Store.YES,
+        Field.Index.NOT_ANALYZED));
+    doc
+        .add(newField("body", "body", Field.Store.YES,
+            Field.Index.NOT_ANALYZED));
+    writer.addDocument(doc);
+    
+    IndexReader reader = writer.getReader();
+    writer.close();
+    
+    IndexSearcher search = newSearcher(reader);
+    Query q = new TermQuery(new Term("body", "body"));
+    
+    // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
+    // RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
+    // characters properly.
+    Collator collator = Collator.getInstance(new Locale("ar"));
+    
+    // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
+    // orders the U+0698 character before the U+0633 character, so the single
+    // index Term below should NOT be returned by a TermRangeFilter with a Farsi
+    // Collator (or an Arabic one for the case when Farsi is not supported).
+    int numHits = search.search(q, new TermRangeFilter("content", "\u062F",
+        "\u0698", T, T, collator), 1000).totalHits;
+    assertEquals("The index Term should not be included.", 0, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("content", "\u0633",
+        "\u0638", T, T, collator), 1000).totalHits;
+    assertEquals("The index Term should be included.", 1, numHits);
+    search.close();
+    reader.close();
+    farsiIndex.close();
+  }
+  
+  @Test
+  public void testDanish() throws Exception {
+    
+    /* build an index */
+    Directory danishIndex = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, danishIndex);
+    // Danish collation orders the words below in the given order
+    // (example taken from TestSort.testInternationalSort() ).
+    String[] words = {"H\u00D8T", "H\u00C5T", "MAND"};
+    for (int docnum = 0; docnum < words.length; ++docnum) {
+      Document doc = new Document();
+      doc.add(newField("content", words[docnum], Field.Store.YES,
+          Field.Index.NOT_ANALYZED));
+      doc.add(newField("body", "body", Field.Store.YES,
+          Field.Index.NOT_ANALYZED));
+      writer.addDocument(doc);
+    }
+    IndexReader reader = writer.getReader();
+    writer.close();
+    
+    IndexSearcher search = newSearcher(reader);
+    Query q = new TermQuery(new Term("body", "body"));
+    
+    Collator collator = Collator.getInstance(new Locale("da", "dk"));
+    
+    // Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
+    // but Danish collation does.
+    int numHits = search.search(q, new TermRangeFilter("content", "H\u00D8T",
+        "MAND", F, F, collator), 1000).totalHits;
+    assertEquals("The index Term should be included.", 1, numHits);
+    
+    numHits = search.search(q, new TermRangeFilter("content", "H\u00C5T",
+        "MAND", F, F, collator), 1000).totalHits;
+    assertEquals("The index Term should not be included.", 0, numHits);
+    search.close();
+    reader.close();
+    danishIndex.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestTermRangeQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/TestTermRangeQuery.java
new file mode 100644
index 0000000..bb11751
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestTermRangeQuery.java
@@ -0,0 +1,410 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+
+import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+import java.io.Reader;
+import java.util.Locale;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.Arrays;
+import java.text.Collator;
+
+
+public class TestTermRangeQuery extends LuceneTestCase {
+
+  private int docCount = 0;
+  private Directory dir;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    dir = newDirectory();
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    dir.close();
+    super.tearDown();
+  }
+
+  public void testExclusive() throws Exception {
+    Query query = new TermRangeQuery("content", "A", "C", false, false);
+    initializeIndex(new String[] {"A", "B", "C", "D"});
+    IndexSearcher searcher = new IndexSearcher(dir, true);
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("A,B,C,D, only B in range", 1, hits.length);
+    searcher.close();
+
+    initializeIndex(new String[] {"A", "B", "D"});
+    searcher = new IndexSearcher(dir, true);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("A,B,D, only B in range", 1, hits.length);
+    searcher.close();
+
+    addDoc("C");
+    searcher = new IndexSearcher(dir, true);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("C added, still only B in range", 1, hits.length);
+    searcher.close();
+  }
+  
+  public void testInclusive() throws Exception {
+    Query query = new TermRangeQuery("content", "A", "C", true, true);
+
+    initializeIndex(new String[]{"A", "B", "C", "D"});
+    IndexSearcher searcher = new IndexSearcher(dir, true);
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("A,B,C,D - A,B,C in range", 3, hits.length);
+    searcher.close();
+
+    initializeIndex(new String[]{"A", "B", "D"});
+    searcher = new IndexSearcher(dir, true);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("A,B,D - A and B in range", 2, hits.length);
+    searcher.close();
+
+    addDoc("C");
+    searcher = new IndexSearcher(dir, true);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("C added - A, B, C in range", 3, hits.length);
+    searcher.close();
+  }
+
+  /** This test should not be here, but it tests the fuzzy query rewrite mode (TOP_TERMS_SCORING_BOOLEAN_REWRITE)
+   * with constant score and checks, that only the lower end of terms is put into the range */
+  public void testTopTermsRewrite() throws Exception {
+    initializeIndex(new String[]{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"});
+
+    IndexSearcher searcher = new IndexSearcher(dir, true);
+    TermRangeQuery query = new TermRangeQuery("content", "B", "J", true, true);
+    checkBooleanTerms(searcher, query, "B", "C", "D", "E", "F", "G", "H", "I", "J");
+    
+    final int savedClauseCount = BooleanQuery.getMaxClauseCount();
+    try {
+      BooleanQuery.setMaxClauseCount(3);
+      checkBooleanTerms(searcher, query, "B", "C", "D");
+    } finally {
+      BooleanQuery.setMaxClauseCount(savedClauseCount);
+    }
+    searcher.close();
+  }
+  
+  private void checkBooleanTerms(Searcher searcher, TermRangeQuery query, String... terms) throws IOException {
+    query.setRewriteMethod(new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(50));
+    final BooleanQuery bq = (BooleanQuery) searcher.rewrite(query);
+    final Set<String> allowedTerms = new HashSet<String>(Arrays.asList(terms));
+    assertEquals(allowedTerms.size(), bq.clauses().size());
+    for (BooleanClause c : bq.clauses()) {
+      assertTrue(c.getQuery() instanceof TermQuery);
+      final TermQuery tq = (TermQuery) c.getQuery();
+      final String term = tq.getTerm().text();
+      assertTrue("invalid term: "+ term, allowedTerms.contains(term));
+      allowedTerms.remove(term); // remove to fail on double terms
+    }
+    assertEquals(0, allowedTerms.size());
+  }
+
+  public void testEqualsHashcode() {
+    Query query = new TermRangeQuery("content", "A", "C", true, true);
+    
+    query.setBoost(1.0f);
+    Query other = new TermRangeQuery("content", "A", "C", true, true);
+    other.setBoost(1.0f);
+
+    assertEquals("query equals itself is true", query, query);
+    assertEquals("equivalent queries are equal", query, other);
+    assertEquals("hashcode must return same value when equals is true", query.hashCode(), other.hashCode());
+
+    other.setBoost(2.0f);
+    assertFalse("Different boost queries are not equal", query.equals(other));
+
+    other = new TermRangeQuery("notcontent", "A", "C", true, true);
+    assertFalse("Different fields are not equal", query.equals(other));
+
+    other = new TermRangeQuery("content", "X", "C", true, true);
+    assertFalse("Different lower terms are not equal", query.equals(other));
+
+    other = new TermRangeQuery("content", "A", "Z", true, true);
+    assertFalse("Different upper terms are not equal", query.equals(other));
+
+    query = new TermRangeQuery("content", null, "C", true, true);
+    other = new TermRangeQuery("content", null, "C", true, true);
+    assertEquals("equivalent queries with null lowerterms are equal()", query, other);
+    assertEquals("hashcode must return same value when equals is true", query.hashCode(), other.hashCode());
+
+    query = new TermRangeQuery("content", "C", null, true, true);
+    other = new TermRangeQuery("content", "C", null, true, true);
+    assertEquals("equivalent queries with null upperterms are equal()", query, other);
+    assertEquals("hashcode returns same value", query.hashCode(), other.hashCode());
+
+    query = new TermRangeQuery("content", null, "C", true, true);
+    other = new TermRangeQuery("content", "C", null, true, true);
+    assertFalse("queries with different upper and lower terms are not equal", query.equals(other));
+
+    query = new TermRangeQuery("content", "A", "C", false, false);
+    other = new TermRangeQuery("content", "A", "C", true, true);
+    assertFalse("queries with different inclusive are not equal", query.equals(other));
+    
+    query = new TermRangeQuery("content", "A", "C", false, false);
+    other = new TermRangeQuery("content", "A", "C", false, false, Collator.getInstance());
+    assertFalse("a query with a collator is not equal to one without", query.equals(other));
+  }
+
+  public void testExclusiveCollating() throws Exception {
+    Query query = new TermRangeQuery("content", "A", "C", false, false, Collator.getInstance(Locale.ENGLISH));
+    initializeIndex(new String[] {"A", "B", "C", "D"});
+    IndexSearcher searcher = new IndexSearcher(dir, true);
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("A,B,C,D, only B in range", 1, hits.length);
+    searcher.close();
+
+    initializeIndex(new String[] {"A", "B", "D"});
+    searcher = new IndexSearcher(dir, true);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("A,B,D, only B in range", 1, hits.length);
+    searcher.close();
+
+    addDoc("C");
+    searcher = new IndexSearcher(dir, true);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("C added, still only B in range", 1, hits.length);
+    searcher.close();
+  }
+
+  public void testInclusiveCollating() throws Exception {
+    Query query = new TermRangeQuery("content", "A", "C",true, true, Collator.getInstance(Locale.ENGLISH));
+
+    initializeIndex(new String[]{"A", "B", "C", "D"});
+    IndexSearcher searcher = new IndexSearcher(dir, true);
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("A,B,C,D - A,B,C in range", 3, hits.length);
+    searcher.close();
+
+    initializeIndex(new String[]{"A", "B", "D"});
+    searcher = new IndexSearcher(dir, true);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("A,B,D - A and B in range", 2, hits.length);
+    searcher.close();
+
+    addDoc("C");
+    searcher = new IndexSearcher(dir, true);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("C added - A, B, C in range", 3, hits.length);
+    searcher.close();
+  }
+
+  public void testFarsi() throws Exception {
+    // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
+    // RuleBasedCollator.  However, the Arabic Locale seems to order the Farsi
+    // characters properly.
+    Collator collator = Collator.getInstance(new Locale("ar"));
+    Query query = new TermRangeQuery("content", "\u062F", "\u0698", true, true, collator);
+    // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
+    // orders the U+0698 character before the U+0633 character, so the single
+    // index Term below should NOT be returned by a TermRangeQuery with a Farsi
+    // Collator (or an Arabic one for the case when Farsi is not supported).
+    initializeIndex(new String[]{ "\u0633\u0627\u0628"});
+    IndexSearcher searcher = new IndexSearcher(dir, true);
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("The index Term should not be included.", 0, hits.length);
+
+    query = new TermRangeQuery("content", "\u0633", "\u0638",true, true, collator);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("The index Term should be included.", 1, hits.length);
+    searcher.close();
+  }
+  
+  public void testDanish() throws Exception {
+    Collator collator = Collator.getInstance(new Locale("da", "dk"));
+    // Danish collation orders the words below in the given order (example taken
+    // from TestSort.testInternationalSort() ).
+    String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
+    Query query = new TermRangeQuery("content", "H\u00D8T", "MAND", false, false, collator);
+
+    // Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
+    // but Danish collation does.
+    initializeIndex(words);
+    IndexSearcher searcher = new IndexSearcher(dir, true);
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("The index Term should be included.", 1, hits.length);
+
+    query = new TermRangeQuery("content", "H\u00C5T", "MAND", false, false, collator);
+    hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals("The index Term should not be included.", 0, hits.length);
+    searcher.close();
+  }
+
+  private static class SingleCharAnalyzer extends Analyzer {
+
+    private static class SingleCharTokenizer extends Tokenizer {
+      char[] buffer = new char[1];
+      boolean done = false;
+      CharTermAttribute termAtt;
+      
+      public SingleCharTokenizer(Reader r) {
+        super(r);
+        termAtt = addAttribute(CharTermAttribute.class);
+      }
+
+      @Override
+      public boolean incrementToken() throws IOException {
+        if (done)
+          return false;
+        else {
+          int count = input.read(buffer);
+          clearAttributes();
+          done = true;
+          if (count == 1) {
+            termAtt.copyBuffer(buffer, 0, 1);
+          }
+          return true;
+        }
+      }
+
+      @Override
+      public final void reset(Reader reader) throws IOException {
+        super.reset(reader);
+        done = false;
+      }
+    }
+
+    @Override
+    public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException {
+      Tokenizer tokenizer = (Tokenizer) getPreviousTokenStream();
+      if (tokenizer == null) {
+        tokenizer = new SingleCharTokenizer(reader);
+        setPreviousTokenStream(tokenizer);
+      } else
+        tokenizer.reset(reader);
+      return tokenizer;
+    }
+
+    @Override
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      return new SingleCharTokenizer(reader);
+    }
+  }
+
+  private void initializeIndex(String[] values) throws IOException {
+    initializeIndex(values, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
+  }
+
+  private void initializeIndex(String[] values, Analyzer analyzer) throws IOException {
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, analyzer).setOpenMode(OpenMode.CREATE));
+    for (int i = 0; i < values.length; i++) {
+      insertDoc(writer, values[i]);
+    }
+    writer.close();
+  }
+
+  // shouldnt create an analyzer for every doc?
+  private void addDoc(String content) throws IOException {
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.APPEND));
+    insertDoc(writer, content);
+    writer.close();
+  }
+
+  private void insertDoc(IndexWriter writer, String content) throws IOException {
+    Document doc = new Document();
+
+    doc.add(newField("id", "id" + docCount, Field.Store.YES, Field.Index.NOT_ANALYZED));
+    doc.add(newField("content", content, Field.Store.NO, Field.Index.ANALYZED));
+
+    writer.addDocument(doc);
+    docCount++;
+  }
+
+  // LUCENE-38
+  public void testExclusiveLowerNull() throws Exception {
+    Analyzer analyzer = new SingleCharAnalyzer();
+    //http://issues.apache.org/jira/browse/LUCENE-38
+    Query query = new TermRangeQuery("content", null, "C",
+                                 false, false);
+    initializeIndex(new String[] {"A", "B", "", "C", "D"}, analyzer);
+    IndexSearcher searcher = new IndexSearcher(dir, true);
+    int numHits = searcher.search(query, null, 1000).totalHits;
+    // When Lucene-38 is fixed, use the assert on the next line:
+    assertEquals("A,B,<empty string>,C,D => A, B & <empty string> are in range", 3, numHits);
+    // until Lucene-38 is fixed, use this assert:
+    //assertEquals("A,B,<empty string>,C,D => A, B & <empty string> are in range", 2, hits.length());
+
+    searcher.close();
+    initializeIndex(new String[] {"A", "B", "", "D"}, analyzer);
+    searcher = new IndexSearcher(dir, true);
+    numHits = searcher.search(query, null, 1000).totalHits;
+    // When Lucene-38 is fixed, use the assert on the next line:
+    assertEquals("A,B,<empty string>,D => A, B & <empty string> are in range", 3, numHits);
+    // until Lucene-38 is fixed, use this assert:
+    //assertEquals("A,B,<empty string>,D => A, B & <empty string> are in range", 2, hits.length());
+    searcher.close();
+    addDoc("C");
+    searcher = new IndexSearcher(dir, true);
+    numHits = searcher.search(query, null, 1000).totalHits;
+    // When Lucene-38 is fixed, use the assert on the next line:
+    assertEquals("C added, still A, B & <empty string> are in range", 3, numHits);
+    // until Lucene-38 is fixed, use this assert
+    //assertEquals("C added, still A, B & <empty string> are in range", 2, hits.length());
+    searcher.close();
+  }
+
+  // LUCENE-38
+  public void testInclusiveLowerNull() throws Exception {
+    //http://issues.apache.org/jira/browse/LUCENE-38
+    Analyzer analyzer = new SingleCharAnalyzer();
+    Query query = new TermRangeQuery("content", null, "C", true, true);
+    initializeIndex(new String[]{"A", "B", "","C", "D"}, analyzer);
+    IndexSearcher searcher = new IndexSearcher(dir, true);
+    int numHits = searcher.search(query, null, 1000).totalHits;
+    // When Lucene-38 is fixed, use the assert on the next line:
+    assertEquals("A,B,<empty string>,C,D => A,B,<empty string>,C in range", 4, numHits);
+    // until Lucene-38 is fixed, use this assert
+    //assertEquals("A,B,<empty string>,C,D => A,B,<empty string>,C in range", 3, hits.length());
+    searcher.close();
+    initializeIndex(new String[]{"A", "B", "", "D"}, analyzer);
+    searcher = new IndexSearcher(dir, true);
+    numHits = searcher.search(query, null, 1000).totalHits;
+    // When Lucene-38 is fixed, use the assert on the next line:
+    assertEquals("A,B,<empty string>,D - A, B and <empty string> in range", 3, numHits);
+    // until Lucene-38 is fixed, use this assert
+    //assertEquals("A,B,<empty string>,D => A, B and <empty string> in range", 2, hits.length());
+    searcher.close();
+    addDoc("C");
+    searcher = new IndexSearcher(dir, true);
+    numHits = searcher.search(query, null, 1000).totalHits;
+    // When Lucene-38 is fixed, use the assert on the next line:
+    assertEquals("C added => A,B,<empty string>,C in range", 4, numHits);
+    // until Lucene-38 is fixed, use this assert
+    //assertEquals("C added => A,B,<empty string>,C in range", 3, hits.length());
+     searcher.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestTermScorer.java b/lucene/backwards/src/test/org/apache/lucene/search/TestTermScorer.java
new file mode 100644
index 0000000..778d9ed
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestTermScorer.java
@@ -0,0 +1,182 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestTermScorer extends LuceneTestCase {
+  protected Directory directory;
+  private static final String FIELD = "field";
+  
+  protected String[] values = new String[] {"all", "dogs dogs", "like",
+      "playing", "fetch", "all"};
+  protected IndexSearcher indexSearcher;
+  protected IndexReader indexReader;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    directory = newDirectory();
+    
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    for (int i = 0; i < values.length; i++) {
+      Document doc = new Document();
+      doc
+          .add(newField(FIELD, values[i], Field.Store.YES,
+              Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+    writer.optimize();
+    indexReader = writer.getReader();
+    writer.close();
+    indexSearcher = newSearcher(indexReader);
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    indexSearcher.close();
+    indexReader.close();
+    directory.close();
+    super.tearDown();
+  }
+
+  public void test() throws IOException {
+    
+    Term allTerm = new Term(FIELD, "all");
+    TermQuery termQuery = new TermQuery(allTerm);
+    
+    Weight weight = indexSearcher.createNormalizedWeight(termQuery);
+    IndexReader sub = indexSearcher.getIndexReader().getSequentialSubReaders() == null ?
+                indexSearcher.getIndexReader() : indexSearcher.getIndexReader().getSequentialSubReaders()[0];
+    Scorer ts = weight.scorer(sub, true, true);
+    // we have 2 documents with the term all in them, one document for all the
+    // other values
+    final List<TestHit> docs = new ArrayList<TestHit>();
+    // must call next first
+    
+    ts.score(new Collector() {
+      private int base = 0;
+      private Scorer scorer;
+      
+      @Override
+      public void setScorer(Scorer scorer) throws IOException {
+        this.scorer = scorer;
+      }
+      
+      @Override
+      public void collect(int doc) throws IOException {
+        float score = scorer.score();
+        doc = doc + base;
+        docs.add(new TestHit(doc, score));
+        assertTrue("score " + score + " is not greater than 0", score > 0);
+        assertTrue("Doc: " + doc + " does not equal 0 or doc does not equal 5",
+            doc == 0 || doc == 5);
+      }
+      
+      @Override
+      public void setNextReader(IndexReader reader, int docBase) {
+        base = docBase;
+      }
+      
+      @Override
+      public boolean acceptsDocsOutOfOrder() {
+        return true;
+      }
+    });
+    assertTrue("docs Size: " + docs.size() + " is not: " + 2, docs.size() == 2);
+    TestHit doc0 = docs.get(0);
+    TestHit doc5 = docs.get(1);
+    // The scores should be the same
+    assertTrue(doc0.score + " does not equal: " + doc5.score,
+        doc0.score == doc5.score);
+    /*
+     * Score should be (based on Default Sim.: All floats are approximate tf = 1
+     * numDocs = 6 docFreq(all) = 2 idf = ln(6/3) + 1 = 1.693147 idf ^ 2 =
+     * 2.8667 boost = 1 lengthNorm = 1 //there is 1 term in every document coord
+     * = 1 sumOfSquaredWeights = (idf * boost) ^ 2 = 1.693147 ^ 2 = 2.8667
+     * queryNorm = 1 / (sumOfSquaredWeights)^0.5 = 1 /(1.693147) = 0.590
+     * 
+     * score = 1 * 2.8667 * 1 * 1 * 0.590 = 1.69
+     */
+    assertTrue(doc0.score + " does not equal: " + 1.6931472f,
+        doc0.score == 1.6931472f);
+  }
+  
+  public void testNext() throws Exception {
+    
+    Term allTerm = new Term(FIELD, "all");
+    TermQuery termQuery = new TermQuery(allTerm);
+    
+    Weight weight = indexSearcher.createNormalizedWeight(termQuery);
+    
+    IndexReader sub = indexSearcher.getIndexReader().getSequentialSubReaders() == null ?
+        indexSearcher.getIndexReader() : indexSearcher.getIndexReader().getSequentialSubReaders()[0];
+    Scorer ts = weight.scorer(sub, true, true);
+    assertTrue("next did not return a doc",
+        ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+    assertTrue("score is not correct", ts.score() == 1.6931472f);
+    assertTrue("next did not return a doc",
+        ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+    assertTrue("score is not correct", ts.score() == 1.6931472f);
+    assertTrue("next returned a doc and it should not have",
+        ts.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
+  }
+  
+  public void testAdvance() throws Exception {
+    
+    Term allTerm = new Term(FIELD, "all");
+    TermQuery termQuery = new TermQuery(allTerm);
+    
+    Weight weight = indexSearcher.createNormalizedWeight(termQuery);
+    
+    IndexReader sub = indexSearcher.getIndexReader().getSequentialSubReaders() == null ? 
+        indexSearcher.getIndexReader() : indexSearcher.getIndexReader().getSequentialSubReaders()[0];
+        
+    Scorer ts = weight.scorer(sub, true, true);
+    assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
+    // The next doc should be doc 5
+    assertTrue("doc should be number 5", ts.docID() == 5);
+  }
+  
+  private class TestHit {
+    public int doc;
+    public float score;
+    
+    public TestHit(int doc, float score) {
+      this.doc = doc;
+      this.score = score;
+    }
+    
+    @Override
+    public String toString() {
+      return "TestHit{" + "doc=" + doc + ", score=" + score + "}";
+    }
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestTermVectors.java b/lucene/backwards/src/test/org/apache/lucene/search/TestTermVectors.java
new file mode 100644
index 0000000..53915c8
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestTermVectors.java
@@ -0,0 +1,532 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.index.*;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.English;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.SortedSet;
+
+public class TestTermVectors extends LuceneTestCase {
+  private IndexSearcher searcher;
+  private IndexReader reader;
+  private Directory directory;
+
+  @Override
+  public void setUp() throws Exception {                  
+    super.setUp();
+    directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy()));
+    //writer.setUseCompoundFile(true);
+    //writer.infoStream = System.out;
+    for (int i = 0; i < 1000; i++) {
+      Document doc = new Document();
+      Field.TermVector termVector;
+      int mod3 = i % 3;
+      int mod2 = i % 2;
+      if (mod2 == 0 && mod3 == 0){
+        termVector = Field.TermVector.WITH_POSITIONS_OFFSETS;
+      }
+      else if (mod2 == 0){
+        termVector = Field.TermVector.WITH_POSITIONS;
+      }
+      else if (mod3 == 0){
+        termVector = Field.TermVector.WITH_OFFSETS;
+      }
+      else {
+        termVector = Field.TermVector.YES;
+      }
+      doc.add(new Field("field", English.intToEnglish(i),
+          Field.Store.YES, Field.Index.ANALYZED, termVector));
+      writer.addDocument(doc);
+    }
+    reader = writer.getReader();
+    writer.close();
+    searcher = newSearcher(reader);
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    searcher.close();
+    reader.close();
+    directory.close();
+    super.tearDown();
+  }
+
+  public void test() {
+    assertTrue(searcher != null);
+  }
+
+  public void testTermVectors() {
+    Query query = new TermQuery(new Term("field", "seventy"));
+    try {
+      ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+      assertEquals(100, hits.length);
+      
+      for (int i = 0; i < hits.length; i++)
+      {
+        TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
+        assertTrue(vector != null);
+        assertTrue(vector.length == 1);
+      }
+    } catch (IOException e) {
+      assertTrue(false);
+    }
+  }
+  
+  public void testTermVectorsFieldOrder() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir, new MockAnalyzer(random, MockTokenizer.SIMPLE, true));
+    Document doc = new Document();
+    doc.add(new Field("c", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("a", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("b", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("x", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    writer.addDocument(doc);
+    IndexReader reader = writer.getReader();
+    writer.close();
+    TermFreqVector[] v = reader.getTermFreqVectors(0);
+    assertEquals(4, v.length);
+    String[] expectedFields = new String[]{"a", "b", "c", "x"};
+    int[] expectedPositions = new int[]{1, 2, 0};
+    for(int i=0;i<v.length;i++) {
+      TermPositionVector posVec = (TermPositionVector) v[i];
+      assertEquals(expectedFields[i], posVec.getField());
+      String[] terms = posVec.getTerms();
+      assertEquals(3, terms.length);
+      assertEquals("content", terms[0]);
+      assertEquals("here", terms[1]);
+      assertEquals("some", terms[2]);
+      for(int j=0;j<3;j++) {
+        int[] positions = posVec.getTermPositions(j);
+        assertEquals(1, positions.length);
+        assertEquals(expectedPositions[j], positions[0]);
+      }
+    }
+    reader.close();
+    dir.close();
+  }
+
+  public void testTermPositionVectors() throws IOException {
+    Query query = new TermQuery(new Term("field", "zero"));
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    
+    for (int i = 0; i < hits.length; i++)
+    {
+      TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
+      assertTrue(vector != null);
+      assertTrue(vector.length == 1);
+      
+      boolean shouldBePosVector = (hits[i].doc % 2 == 0) ? true : false;
+      assertTrue((shouldBePosVector == false) || (shouldBePosVector == true && (vector[0] instanceof TermPositionVector == true)));
+      
+      boolean shouldBeOffVector = (hits[i].doc % 3 == 0) ? true : false;
+      assertTrue((shouldBeOffVector == false) || (shouldBeOffVector == true && (vector[0] instanceof TermPositionVector == true)));
+      
+      if(shouldBePosVector || shouldBeOffVector){
+        TermPositionVector posVec = (TermPositionVector)vector[0];
+        String [] terms = posVec.getTerms();
+        assertTrue(terms != null && terms.length > 0);
+        
+        for (int j = 0; j < terms.length; j++) {
+          int [] positions = posVec.getTermPositions(j);
+          TermVectorOffsetInfo [] offsets = posVec.getOffsets(j);
+          
+          if(shouldBePosVector){
+            assertTrue(positions != null);
+            assertTrue(positions.length > 0);
+          }
+          else
+            assertTrue(positions == null);
+          
+          if(shouldBeOffVector){
+            assertTrue(offsets != null);
+            assertTrue(offsets.length > 0);
+          }
+          else
+            assertTrue(offsets == null);
+        }
+      }
+      else{
+        try{
+          assertTrue(false);
+        }
+        catch(ClassCastException ignore){
+          TermFreqVector freqVec = vector[0];
+          String [] terms = freqVec.getTerms();
+          assertTrue(terms != null && terms.length > 0);
+        }
+        
+      }
+      
+    }
+  }
+  
+  public void testTermOffsetVectors() {
+    Query query = new TermQuery(new Term("field", "fifty"));
+    try {
+      ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+      assertEquals(100, hits.length);
+      
+      for (int i = 0; i < hits.length; i++)
+      {
+        TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
+        assertTrue(vector != null);
+        assertTrue(vector.length == 1);
+        
+        //assertTrue();
+      }
+    } catch (IOException e) {
+      assertTrue(false);
+    }
+  }
+
+  public void testKnownSetOfDocuments() throws IOException {
+    String test1 = "eating chocolate in a computer lab"; //6 terms
+    String test2 = "computer in a computer lab"; //5 terms
+    String test3 = "a chocolate lab grows old"; //5 terms
+    String test4 = "eating chocolate with a chocolate lab in an old chocolate colored computer lab"; //13 terms
+    Map<String,Integer> test4Map = new HashMap<String,Integer>();
+    test4Map.put("chocolate", Integer.valueOf(3));
+    test4Map.put("lab", Integer.valueOf(2));
+    test4Map.put("eating", Integer.valueOf(1));
+    test4Map.put("computer", Integer.valueOf(1));
+    test4Map.put("with", Integer.valueOf(1));
+    test4Map.put("a", Integer.valueOf(1));
+    test4Map.put("colored", Integer.valueOf(1));
+    test4Map.put("in", Integer.valueOf(1));
+    test4Map.put("an", Integer.valueOf(1));
+    test4Map.put("computer", Integer.valueOf(1));
+    test4Map.put("old", Integer.valueOf(1));
+    
+    Document testDoc1 = new Document();
+    setupDoc(testDoc1, test1);
+    Document testDoc2 = new Document();
+    setupDoc(testDoc2, test2);
+    Document testDoc3 = new Document();
+    setupDoc(testDoc3, test3);
+    Document testDoc4 = new Document();
+    setupDoc(testDoc4, test4);
+    
+    Directory dir = newDirectory();
+    
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir, 
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.SIMPLE, true))
+                                                     .setOpenMode(OpenMode.CREATE).setMergePolicy(newLogMergePolicy()));
+    writer.addDocument(testDoc1);
+    writer.addDocument(testDoc2);
+    writer.addDocument(testDoc3);
+    writer.addDocument(testDoc4);
+    IndexReader reader = writer.getReader();
+    writer.close();
+    IndexSearcher knownSearcher = newSearcher(reader);
+    TermEnum termEnum = knownSearcher.reader.terms();
+    TermDocs termDocs = knownSearcher.reader.termDocs();
+    //System.out.println("Terms: " + termEnum.size() + " Orig Len: " + termArray.length);
+    
+    //Similarity sim = knownSearcher.getSimilarity();
+    while (termEnum.next() == true)
+    {
+      Term term = termEnum.term();
+      //System.out.println("Term: " + term);
+      termDocs.seek(term);
+      while (termDocs.next())
+      {
+        int docId = termDocs.doc();
+        int freq = termDocs.freq();
+        //System.out.println("Doc Id: " + docId + " freq " + freq);
+        TermFreqVector vector = knownSearcher.reader.getTermFreqVector(docId, "field");
+        //float tf = sim.tf(freq);
+        //float idf = sim.idf(knownSearcher.docFreq(term), knownSearcher.maxDoc());
+        //float qNorm = sim.queryNorm()
+        //This is fine since we don't have stop words
+        //float lNorm = sim.lengthNorm("field", vector.getTerms().length);
+        //float coord = sim.coord()
+        //System.out.println("TF: " + tf + " IDF: " + idf + " LenNorm: " + lNorm);
+        assertTrue(vector != null);
+        String[] vTerms = vector.getTerms();
+        int [] freqs = vector.getTermFrequencies();
+        for (int i = 0; i < vTerms.length; i++)
+        {
+          if (term.text().equals(vTerms[i]))
+          {
+            assertTrue(freqs[i] == freq);
+          }
+        }
+        
+      }
+      //System.out.println("--------");
+    }
+    Query query = new TermQuery(new Term("field", "chocolate"));
+    ScoreDoc[] hits = knownSearcher.search(query, null, 1000).scoreDocs;
+    //doc 3 should be the first hit b/c it is the shortest match
+    assertTrue(hits.length == 3);
+    /*System.out.println("Hit 0: " + hits.id(0) + " Score: " + hits.score(0) + " String: " + hits.doc(0).toString());
+      System.out.println("Explain: " + knownSearcher.explain(query, hits.id(0)));
+      System.out.println("Hit 1: " + hits.id(1) + " Score: " + hits.score(1) + " String: " + hits.doc(1).toString());
+      System.out.println("Explain: " + knownSearcher.explain(query, hits.id(1)));
+      System.out.println("Hit 2: " + hits.id(2) + " Score: " + hits.score(2) + " String: " +  hits.doc(2).toString());
+      System.out.println("Explain: " + knownSearcher.explain(query, hits.id(2)));*/
+    assertTrue(hits[0].doc == 2);
+    assertTrue(hits[1].doc == 3);
+    assertTrue(hits[2].doc == 0);
+    TermFreqVector vector = knownSearcher.reader.getTermFreqVector(hits[1].doc, "field");
+    assertTrue(vector != null);
+    //System.out.println("Vector: " + vector);
+    String[] terms = vector.getTerms();
+    int [] freqs = vector.getTermFrequencies();
+    assertTrue(terms != null && terms.length == 10);
+    for (int i = 0; i < terms.length; i++) {
+      String term = terms[i];
+      //System.out.println("Term: " + term);
+      int freq = freqs[i];
+      assertTrue(test4.indexOf(term) != -1);
+      Integer freqInt = test4Map.get(term);
+      assertTrue(freqInt != null);
+      assertTrue(freqInt.intValue() == freq);        
+    }
+    SortedTermVectorMapper mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
+    knownSearcher.reader.getTermFreqVector(hits[1].doc, mapper);
+    SortedSet<TermVectorEntry> vectorEntrySet = mapper.getTermVectorEntrySet();
+    assertTrue("mapper.getTermVectorEntrySet() Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
+    TermVectorEntry last = null;
+    for (final TermVectorEntry tve : vectorEntrySet) {
+      if (tve != null && last != null)
+      {
+        assertTrue("terms are not properly sorted", last.getFrequency() >= tve.getFrequency());
+        Integer expectedFreq =  test4Map.get(tve.getTerm());
+        //we expect double the expectedFreq, since there are two fields with the exact same text and we are collapsing all fields
+        assertTrue("Frequency is not correct:", tve.getFrequency() == 2*expectedFreq.intValue());
+      }
+      last = tve;
+      
+    }
+    
+    FieldSortedTermVectorMapper fieldMapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
+    knownSearcher.reader.getTermFreqVector(hits[1].doc, fieldMapper);
+    Map<String,SortedSet<TermVectorEntry>> map = fieldMapper.getFieldToTerms();
+    assertTrue("map Size: " + map.size() + " is not: " + 2, map.size() == 2);
+    vectorEntrySet = map.get("field");
+    assertTrue("vectorEntrySet is null and it shouldn't be", vectorEntrySet != null);
+    assertTrue("vectorEntrySet Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
+    knownSearcher.close();
+    reader.close();
+    dir.close();
+  } 
+  
+  private void setupDoc(Document doc, String text)
+  {
+    doc.add(new Field("field2", text, Field.Store.YES,
+        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("field", text, Field.Store.YES,
+        Field.Index.ANALYZED, Field.TermVector.YES));
+    //System.out.println("Document: " + doc);
+  }
+
+  // Test only a few docs having vectors
+  public void testRareVectors() throws IOException {
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory, 
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.SIMPLE, true))
+        .setOpenMode(OpenMode.CREATE));
+    writer.w.setInfoStream(VERBOSE ? System.out : null);
+    if (VERBOSE) {
+      System.out.println("TEST: now add non-vectors");
+    }
+    for (int i = 0; i < 100; i++) {
+      Document doc = new Document();
+      doc.add(new Field("field", English.intToEnglish(i),
+                        Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
+      writer.addDocument(doc);
+    }
+    if (VERBOSE) {
+      System.out.println("TEST: now add vectors");
+    }
+    for(int i=0;i<10;i++) {
+      Document doc = new Document();
+      doc.add(new Field("field", English.intToEnglish(100+i),
+                        Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      writer.addDocument(doc);
+    }
+
+    if (VERBOSE) {
+      System.out.println("TEST: now getReader");
+    }
+    IndexReader reader = writer.getReader();
+    writer.close();
+    searcher = newSearcher(reader);
+
+    Query query = new TermQuery(new Term("field", "hundred"));
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(10, hits.length);
+    for (int i = 0; i < hits.length; i++) {
+
+      TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
+      assertTrue(vector != null);
+      assertTrue(vector.length == 1);
+    }
+    reader.close();
+  }
+
+
+  // In a single doc, for the same field, mix the term
+  // vectors up
+  public void testMixedVectrosVectors() throws IOException {
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory, 
+        newIndexWriterConfig(TEST_VERSION_CURRENT, 
+        new MockAnalyzer(random, MockTokenizer.SIMPLE, true)).setOpenMode(OpenMode.CREATE));
+    Document doc = new Document();
+    doc.add(new Field("field", "one",
+                      Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
+    doc.add(new Field("field", "one",
+                      Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES));
+    doc.add(new Field("field", "one",
+                      Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
+    doc.add(new Field("field", "one",
+                      Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
+    doc.add(new Field("field", "one",
+                      Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    writer.addDocument(doc);
+    IndexReader reader = writer.getReader();
+    writer.close();
+
+    searcher = newSearcher(reader);
+
+    Query query = new TermQuery(new Term("field", "one"));
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+
+    TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[0].doc);
+    assertTrue(vector != null);
+    assertTrue(vector.length == 1);
+    TermPositionVector tfv = (TermPositionVector) vector[0];
+    assertTrue(tfv.getField().equals("field"));
+    String[] terms = tfv.getTerms();
+    assertEquals(1, terms.length);
+    assertEquals(terms[0], "one");
+    assertEquals(5, tfv.getTermFrequencies()[0]);
+
+    int[] positions = tfv.getTermPositions(0);
+    assertEquals(5, positions.length);
+    for(int i=0;i<5;i++)
+      assertEquals(i, positions[i]);
+    TermVectorOffsetInfo[] offsets = tfv.getOffsets(0);
+    assertEquals(5, offsets.length);
+    for(int i=0;i<5;i++) {
+      assertEquals(4*i, offsets[i].getStartOffset());
+      assertEquals(4*i+3, offsets[i].getEndOffset());
+    }
+    reader.close();
+  }
+
+  private IndexWriter createWriter(Directory dir) throws IOException {
+    return new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT,
+        new MockAnalyzer(random)).setMaxBufferedDocs(2));
+  }
+
+  private void createDir(Directory dir) throws IOException {
+    IndexWriter writer = createWriter(dir);
+    writer.addDocument(createDoc());
+    writer.close();
+  }
+
+  private Document createDoc() {
+    Document doc = new Document();
+    doc.add(new Field("c", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    return doc;
+  }
+
+  private void verifyIndex(Directory dir) throws IOException {
+    IndexReader r = IndexReader.open(dir);
+    int numDocs = r.numDocs();
+    for (int i = 0; i < numDocs; i++) {
+      TermFreqVector tfv = r.getTermFreqVector(i, "c");
+      assertNotNull("term vectors should not have been null for document " + i, tfv);
+    }
+    r.close();
+  }
+  
+  public void testOptimizeAddDocs() throws Exception {
+    Directory target = newDirectory();
+    IndexWriter writer = createWriter(target);
+    // with maxBufferedDocs=2, this results in two segments, so that optimize
+    // actually does something.
+    for (int i = 0; i < 4; i++) {
+      writer.addDocument(createDoc());
+    }
+    writer.optimize();
+    writer.close();
+    
+    verifyIndex(target);
+    target.close();
+  }
+
+  public void testOptimizeAddIndexesDir() throws Exception {
+    Directory[] input = new Directory[] { newDirectory(), newDirectory() };
+    Directory target = newDirectory();
+    
+    for (Directory dir : input) {
+      createDir(dir);
+    }
+    
+    IndexWriter writer = createWriter(target);
+    writer.addIndexes(input);
+    writer.optimize();
+    writer.close();
+
+    verifyIndex(target);
+
+    IOUtils.close(target, input[0], input[1]);
+  }
+  
+  public void testOptimizeAddIndexesReader() throws Exception {
+    Directory[] input = new Directory[] { newDirectory(), newDirectory() };
+    Directory target = newDirectory();
+    
+    for (Directory dir : input) {
+      createDir(dir);
+    }
+    
+    IndexWriter writer = createWriter(target);
+    for (Directory dir : input) {
+      IndexReader r = IndexReader.open(dir);
+      writer.addIndexes(r);
+      r.close();
+    }
+    writer.optimize();
+    writer.close();
+    
+    verifyIndex(target);
+    IOUtils.close(target, input[0], input[1]);
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestThreadSafe.java b/lucene/backwards/src/test/org/apache/lucene/search/TestThreadSafe.java
new file mode 100755
index 0000000..110bb94
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestThreadSafe.java
@@ -0,0 +1,154 @@
+package org.apache.lucene.search;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.*;
+
+import java.util.Random;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.io.IOException;
+
+public class TestThreadSafe extends LuceneTestCase {
+  Directory dir1;
+
+  IndexReader ir1;
+
+  class Thr extends Thread {
+    final int iter;
+    final Random rand;
+    final AtomicBoolean failed;
+
+    // pass in random in case we want to make things reproducable
+    public Thr(int iter, Random rand, AtomicBoolean failed) {
+      this.iter = iter;
+      this.rand = rand;
+      this.failed = failed;
+    }
+
+    @Override
+    public void run() {
+      try {
+        for (int i=0; i<iter; i++) {
+          /*** future
+           // pick a random index reader... a shared one, or create your own
+           IndexReader ir;
+           ***/
+
+          switch(rand.nextInt(1)) {
+            case 0: loadDoc(ir1); break;
+          }
+
+        }
+      } catch (Throwable th) {
+        failed.set(true);
+        throw new RuntimeException(th);
+      }
+    }
+
+
+    void loadDoc(IndexReader ir) throws IOException {
+      // beware of deleted docs in the future
+      Document doc = ir.document(rand.nextInt(ir.maxDoc()),
+                new FieldSelector() {
+                  public FieldSelectorResult accept(String fieldName) {
+                    switch(rand.nextInt(2)) {
+                      case 0: return FieldSelectorResult.LAZY_LOAD;
+                      case 1: return FieldSelectorResult.LOAD;
+                      // TODO: add other options
+                      default: return FieldSelectorResult.LOAD;
+                    }
+                  }
+                }
+              );
+
+      List<Fieldable> fields = doc.getFields();
+      for (final Fieldable f : fields ) {
+        validateField(f);
+      }
+
+    }
+
+  }
+
+
+  void validateField(Fieldable f) {
+    String val = f.stringValue();
+    if (!val.startsWith("^") || !val.endsWith("$")) {
+      throw new RuntimeException("Invalid field:" + f.toString() + " val=" +val);
+    }
+  }
+
+  String[] words = "now is the time for all good men to come to the aid of their country".split(" ");
+
+  void buildDir(Directory dir, int nDocs, int maxFields, int maxFieldLen) throws IOException {
+    IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
+    for (int j=0; j<nDocs; j++) {
+      Document d = new Document();
+      int nFields = random.nextInt(maxFields);
+      for (int i=0; i<nFields; i++) {
+        int flen = random.nextInt(maxFieldLen);
+        StringBuilder sb = new StringBuilder("^ ");
+        while (sb.length() < flen) sb.append(' ').append(words[random.nextInt(words.length)]);
+        sb.append(" $");
+        Field.Store store = Field.Store.YES;  // make random later
+        Field.Index index = Field.Index.ANALYZED;  // make random later
+        d.add(newField("f"+i, sb.toString(), store, index));
+      }
+      iw.addDocument(d);
+    }
+    iw.close();
+  }
+
+
+  void doTest(int iter, int nThreads) throws Exception {
+    Thr[] tarr = new Thr[nThreads];
+    AtomicBoolean failed = new AtomicBoolean();
+    for (int i=0; i<nThreads; i++) {
+      tarr[i] = new Thr(iter, new Random(random.nextLong()), failed);
+      tarr[i].start();
+    }
+    for (int i=0; i<nThreads; i++) {
+      tarr[i].join();
+    }
+    assertFalse(failed.get());
+  }
+
+  public void testLazyLoadThreadSafety() throws Exception{
+    dir1 = newDirectory();
+    // test w/ field sizes bigger than the buffer of an index input
+    buildDir(dir1, 15, 5, 2000);
+
+    // do many small tests so the thread locals go away inbetween
+    int num = atLeast(10);
+    for (int i = 0; i < num; i++) {
+      ir1 = IndexReader.open(dir1, false);
+      doTest(10,10);
+      ir1.close();
+    }
+    dir1.close();
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java b/lucene/backwards/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
new file mode 100644
index 0000000..b459410
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
@@ -0,0 +1,351 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.BitSet;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.search.TimeLimitingCollector.TimeExceededException;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.ThreadInterruptedException;
+
+/**
+ * Tests the {@link TimeLimitingCollector}.  This test checks (1) search
+ * correctness (regardless of timeout), (2) expected timeout behavior,
+ * and (3) a sanity test with multiple searching threads.
+ */
+public class TestTimeLimitingCollector extends LuceneTestCase {
+  private static final int SLOW_DOWN = 3;
+  private static final long TIME_ALLOWED = 17 * SLOW_DOWN; // so searches can find about 17 docs.
+  
+  // max time allowed is relaxed for multithreading tests. 
+  // the multithread case fails when setting this to 1 (no slack) and launching many threads (>2000).  
+  // but this is not a real failure, just noise.
+  private static final double MULTI_THREAD_SLACK = 7;      
+            
+  private static final int N_DOCS = 3000;
+  private static final int N_THREADS = 50;
+
+  private Searcher searcher;
+  private Directory directory;
+  private IndexReader reader;
+
+  private final String FIELD_NAME = "body";
+  private Query query;
+
+  /**
+   * initializes searcher with a document set
+   */
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    final String docText[] = {
+        "docThatNeverMatchesSoWeCanRequireLastDocCollectedToBeGreaterThanZero",
+        "one blah three",
+        "one foo three multiOne",
+        "one foobar three multiThree",
+        "blueberry pancakes",
+        "blueberry pie",
+        "blueberry strudel",
+        "blueberry pizza",
+    };
+    directory = newDirectory();
+    RandomIndexWriter iw = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    
+    for (int i=0; i<N_DOCS; i++) {
+      add(docText[i%docText.length], iw);
+    }
+    reader = iw.getReader();
+    iw.close();
+    searcher = newSearcher(reader);
+
+    String qtxt = "one";
+    // start from 1, so that the 0th doc never matches
+    for (int i = 1; i < docText.length; i++) {
+      qtxt += ' ' + docText[i]; // large query so that search will be longer
+    }
+    QueryParser queryParser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, new MockAnalyzer(random));
+    query = queryParser.parse(qtxt);
+    
+    // warm the searcher
+    searcher.search(query, null, 1000);
+
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    searcher.close();
+    reader.close();
+    directory.close();
+    super.tearDown();
+  }
+
+  private void add(String value, RandomIndexWriter iw) throws IOException {
+    Document d = new Document();
+    d.add(newField(FIELD_NAME, value, Field.Store.NO, Field.Index.ANALYZED));
+    iw.addDocument(d);
+  }
+
+  private void search(Collector collector) throws Exception {
+    searcher.search(query, collector);
+  }
+
+  /**
+   * test search correctness with no timeout
+   */
+  public void testSearch() {
+    doTestSearch();
+  }
+  
+  private void doTestSearch() {
+    int totalResults = 0;
+    int totalTLCResults = 0;
+    try {
+      MyHitCollector myHc = new MyHitCollector();
+      search(myHc);
+      totalResults = myHc.hitCount();
+      
+      myHc = new MyHitCollector();
+      long oneHour = 3600000;
+      Collector tlCollector = createTimedCollector(myHc, oneHour, false);
+      search(tlCollector);
+      totalTLCResults = myHc.hitCount();
+    } catch (Exception e) {
+      e.printStackTrace();
+      assertTrue("Unexpected exception: "+e, false); //==fail
+    }
+    assertEquals( "Wrong number of results!", totalResults, totalTLCResults );
+  }
+
+  private Collector createTimedCollector(MyHitCollector hc, long timeAllowed, boolean greedy) {
+    TimeLimitingCollector res = new TimeLimitingCollector(hc, timeAllowed);
+    res.setGreedy(greedy); // set to true to make sure at least one doc is collected.
+    return res;
+  }
+
+  /**
+   * Test that timeout is obtained, and soon enough!
+   */
+  public void testTimeoutGreedy() {
+    doTestTimeout(false, true);
+  }
+  
+  /**
+   * Test that timeout is obtained, and soon enough!
+   */
+  public void testTimeoutNotGreedy() {
+    doTestTimeout(false, false);
+  }
+
+  private void doTestTimeout(boolean multiThreaded, boolean greedy) {
+    // setup
+    MyHitCollector myHc = new MyHitCollector();
+    myHc.setSlowDown(SLOW_DOWN);
+    Collector tlCollector = createTimedCollector(myHc, TIME_ALLOWED, greedy);
+
+    // search
+    TimeExceededException timoutException = null;
+    try {
+      search(tlCollector);
+    } catch (TimeExceededException x) {
+      timoutException = x;
+    } catch (Exception e) {
+      assertTrue("Unexpected exception: "+e, false); //==fail
+    }
+    
+    // must get exception
+    assertNotNull( "Timeout expected!", timoutException );
+
+    // greediness affect last doc collected
+    int exceptionDoc = timoutException.getLastDocCollected();
+    int lastCollected = myHc.getLastDocCollected(); 
+    assertTrue( "doc collected at timeout must be > 0!", exceptionDoc > 0 );
+    if (greedy) {
+      assertTrue("greedy="+greedy+" exceptionDoc="+exceptionDoc+" != lastCollected="+lastCollected, exceptionDoc==lastCollected);
+      assertTrue("greedy, but no hits found!", myHc.hitCount() > 0 );
+    } else {
+      assertTrue("greedy="+greedy+" exceptionDoc="+exceptionDoc+" not > lastCollected="+lastCollected, exceptionDoc>lastCollected);
+    }
+
+    // verify that elapsed time at exception is within valid limits
+    assertEquals( timoutException.getTimeAllowed(), TIME_ALLOWED);
+    // a) Not too early
+    assertTrue ( "elapsed="+timoutException.getTimeElapsed()+" <= (allowed-resolution)="+(TIME_ALLOWED-TimeLimitingCollector.getResolution()),
+        timoutException.getTimeElapsed() > TIME_ALLOWED-TimeLimitingCollector.getResolution());
+    // b) Not too late.
+    //    This part is problematic in a busy test system, so we just print a warning.
+    //    We already verified that a timeout occurred, we just can't be picky about how long it took.
+    if (timoutException.getTimeElapsed() > maxTime(multiThreaded)) {
+      System.out.println("Informative: timeout exceeded (no action required: most probably just " +
+        " because the test machine is slower than usual):  " +
+        "lastDoc="+exceptionDoc+
+        " ,&& allowed="+timoutException.getTimeAllowed() +
+        " ,&& elapsed="+timoutException.getTimeElapsed() +
+        " >= " + maxTimeStr(multiThreaded));
+    }
+  }
+
+  private long maxTime(boolean multiThreaded) {
+    long res = 2 * TimeLimitingCollector.getResolution() + TIME_ALLOWED + SLOW_DOWN; // some slack for less noise in this test
+    if (multiThreaded) {
+      res *= MULTI_THREAD_SLACK; // larger slack  
+    }
+    return res;
+  }
+
+  private String maxTimeStr(boolean multiThreaded) {
+    String s =
+      "( " +
+      "2*resolution +  TIME_ALLOWED + SLOW_DOWN = " +
+      "2*" + TimeLimitingCollector.getResolution() + " + " + TIME_ALLOWED + " + " + SLOW_DOWN +
+      ")";
+    if (multiThreaded) {
+      s = MULTI_THREAD_SLACK + " * "+s;  
+    }
+    return maxTime(multiThreaded) + " = " + s;
+  }
+
+  /**
+   * Test timeout behavior when resolution is modified. 
+   */
+  public void testModifyResolution() {
+    try {
+      // increase and test
+      long resolution = 20 * TimeLimitingCollector.DEFAULT_RESOLUTION; //400
+      TimeLimitingCollector.setResolution(resolution);
+      assertEquals(resolution, TimeLimitingCollector.getResolution());
+      doTestTimeout(false,true);
+      // decrease much and test
+      resolution = 5;
+      TimeLimitingCollector.setResolution(resolution);
+      assertEquals(resolution, TimeLimitingCollector.getResolution());
+      doTestTimeout(false,true);
+      // return to default and test
+      resolution = TimeLimitingCollector.DEFAULT_RESOLUTION;
+      TimeLimitingCollector.setResolution(resolution);
+      assertEquals(resolution, TimeLimitingCollector.getResolution());
+      doTestTimeout(false,true);
+    } finally {
+      TimeLimitingCollector.setResolution(TimeLimitingCollector.DEFAULT_RESOLUTION);
+    }
+  }
+  
+  /** 
+   * Test correctness with multiple searching threads.
+   */
+  public void testSearchMultiThreaded() throws Exception {
+    doTestMultiThreads(false);
+  }
+
+  /** 
+   * Test correctness with multiple searching threads.
+   */
+  public void testTimeoutMultiThreaded() throws Exception {
+    doTestMultiThreads(true);
+  }
+  
+  private void doTestMultiThreads(final boolean withTimeout) throws Exception {
+    Thread [] threadArray = new Thread[N_THREADS];
+    final BitSet success = new BitSet(N_THREADS);
+    for( int i = 0; i < threadArray.length; ++i ) {
+      final int num = i;
+      threadArray[num] = new Thread() {
+          @Override
+          public void run() {
+            if (withTimeout) {
+              doTestTimeout(true,true);
+            } else {
+              doTestSearch();
+            }
+            synchronized(success) {
+              success.set(num);
+            }
+          }
+      };
+    }
+    for( int i = 0; i < threadArray.length; ++i ) {
+      threadArray[i].start();
+    }
+    for( int i = 0; i < threadArray.length; ++i ) {
+      threadArray[i].join();
+    }
+    assertEquals("some threads failed!", N_THREADS,success.cardinality());
+  }
+  
+  // counting collector that can slow down at collect().
+  private class MyHitCollector extends Collector {
+    private final BitSet bits = new BitSet();
+    private int slowdown = 0;
+    private int lastDocCollected = -1;
+    private int docBase = 0;
+
+    /**
+     * amount of time to wait on each collect to simulate a long iteration
+     */
+    public void setSlowDown( int milliseconds ) {
+      slowdown = milliseconds;
+    }
+    
+    public int hitCount() {
+      return bits.cardinality();
+    }
+
+    public int getLastDocCollected() {
+      return lastDocCollected;
+    }
+
+    @Override
+    public void setScorer(Scorer scorer) throws IOException {
+      // scorer is not needed
+    }
+    
+    @Override
+    public void collect(final int doc) throws IOException {
+      int docId = doc + docBase;
+      if( slowdown > 0 ) {
+        try {
+          Thread.sleep(slowdown);
+        } catch (InterruptedException ie) {
+          throw new ThreadInterruptedException(ie);
+        }
+      }
+      assert docId >= 0: " base=" + docBase + " doc=" + doc;
+      bits.set( docId );
+      lastDocCollected = docId;
+    }
+    
+    @Override
+    public void setNextReader(IndexReader reader, int base) {
+      docBase = base;
+    }
+    
+    @Override
+    public boolean acceptsDocsOutOfOrder() {
+      return false;
+    }
+
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestTopDocsCollector.java b/lucene/backwards/src/test/org/apache/lucene/search/TestTopDocsCollector.java
new file mode 100644
index 0000000..c1e7ab9
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestTopDocsCollector.java
@@ -0,0 +1,211 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestTopDocsCollector extends LuceneTestCase {
+
+  private static final class MyTopsDocCollector extends TopDocsCollector<ScoreDoc> {
+
+    private int idx = 0;
+    private int base = 0;
+    
+    public MyTopsDocCollector(int size) {
+      super(new HitQueue(size, false));
+    }
+    
+    @Override
+    protected TopDocs newTopDocs(ScoreDoc[] results, int start) {
+      if (results == null) {
+        return EMPTY_TOPDOCS;
+      }
+      
+      float maxScore = Float.NaN;
+      if (start == 0) {
+        maxScore = results[0].score;
+      } else {
+        for (int i = pq.size(); i > 1; i--) { pq.pop(); }
+        maxScore = pq.pop().score;
+      }
+      
+      return new TopDocs(totalHits, results, maxScore);
+    }
+    
+    @Override
+    public void collect(int doc) throws IOException {
+      ++totalHits;
+      pq.insertWithOverflow(new ScoreDoc(doc + base, scores[idx++]));
+    }
+
+    @Override
+    public void setNextReader(IndexReader reader, int docBase)
+        throws IOException {
+      base = docBase;
+    }
+
+    @Override
+    public void setScorer(Scorer scorer) throws IOException {
+      // Don't do anything. Assign scores in random
+    }
+    
+    @Override
+    public boolean acceptsDocsOutOfOrder() {
+      return true;
+    }
+
+  }
+
+  // Scores array to be used by MyTopDocsCollector. If it is changed, MAX_SCORE
+  // must also change.
+  private static final float[] scores = new float[] {
+    0.7767749f, 1.7839992f, 8.9925785f, 7.9608946f, 0.07948637f, 2.6356435f, 
+    7.4950366f, 7.1490803f, 8.108544f, 4.961808f, 2.2423935f, 7.285586f, 4.6699767f,
+    2.9655676f, 6.953706f, 5.383931f, 6.9916306f, 8.365894f, 7.888485f, 8.723962f,
+    3.1796896f, 0.39971232f, 1.3077754f, 6.8489285f, 9.17561f, 5.060466f, 7.9793315f,
+    8.601509f, 4.1858315f, 0.28146625f
+  };
+  
+  private static final float MAX_SCORE = 9.17561f;
+  
+  private Directory dir;
+  private IndexReader reader;
+
+  private TopDocsCollector<ScoreDoc> doSearch(int numResults) throws IOException {
+    Query q = new MatchAllDocsQuery();
+    IndexSearcher searcher = newSearcher(reader);
+    TopDocsCollector<ScoreDoc> tdc = new MyTopsDocCollector(numResults);
+    searcher.search(q, tdc);
+    searcher.close();
+    return tdc;
+  }
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    
+    // populate an index with 30 documents, this should be enough for the test.
+    // The documents have no content - the test uses MatchAllDocsQuery().
+    dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir);
+    for (int i = 0; i < 30; i++) {
+      writer.addDocument(new Document());
+    }
+    reader = writer.getReader();
+    writer.close();
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    reader.close();
+    dir.close();
+    dir = null;
+    super.tearDown();
+  }
+  
+  public void testInvalidArguments() throws Exception {
+    int numResults = 5;
+    TopDocsCollector<ScoreDoc> tdc = doSearch(numResults);
+    
+    // start < 0
+    assertEquals(0, tdc.topDocs(-1).scoreDocs.length);
+    
+    // start > pq.size()
+    assertEquals(0, tdc.topDocs(numResults + 1).scoreDocs.length);
+    
+    // start == pq.size()
+    assertEquals(0, tdc.topDocs(numResults).scoreDocs.length);
+    
+    // howMany < 0
+    assertEquals(0, tdc.topDocs(0, -1).scoreDocs.length);
+    
+    // howMany == 0
+    assertEquals(0, tdc.topDocs(0, 0).scoreDocs.length);
+    
+  }
+  
+  public void testZeroResults() throws Exception {
+    TopDocsCollector<ScoreDoc> tdc = new MyTopsDocCollector(5);
+    assertEquals(0, tdc.topDocs(0, 1).scoreDocs.length);
+  }
+  
+  public void testFirstResultsPage() throws Exception {
+    TopDocsCollector<ScoreDoc> tdc = doSearch(15);
+    assertEquals(10, tdc.topDocs(0, 10).scoreDocs.length);
+  }
+  
+  public void testSecondResultsPages() throws Exception {
+    TopDocsCollector<ScoreDoc> tdc = doSearch(15);
+    // ask for more results than are available
+    assertEquals(5, tdc.topDocs(10, 10).scoreDocs.length);
+    
+    // ask for 5 results (exactly what there should be
+    tdc = doSearch(15);
+    assertEquals(5, tdc.topDocs(10, 5).scoreDocs.length);
+    
+    // ask for less results than there are
+    tdc = doSearch(15);
+    assertEquals(4, tdc.topDocs(10, 4).scoreDocs.length);
+  }
+  
+  public void testGetAllResults() throws Exception {
+    TopDocsCollector<ScoreDoc> tdc = doSearch(15);
+    assertEquals(15, tdc.topDocs().scoreDocs.length);
+  }
+  
+  public void testGetResultsFromStart() throws Exception {
+    TopDocsCollector<ScoreDoc> tdc = doSearch(15);
+    // should bring all results
+    assertEquals(15, tdc.topDocs(0).scoreDocs.length);
+    
+    tdc = doSearch(15);
+    // get the last 5 only.
+    assertEquals(5, tdc.topDocs(10).scoreDocs.length);
+  }
+  
+  public void testMaxScore() throws Exception {
+    // ask for all results
+    TopDocsCollector<ScoreDoc> tdc = doSearch(15);
+    TopDocs td = tdc.topDocs();
+    assertEquals(MAX_SCORE, td.getMaxScore(), 0f);
+    
+    // ask for 5 last results
+    tdc = doSearch(15);
+    td = tdc.topDocs(10);
+    assertEquals(MAX_SCORE, td.getMaxScore(), 0f);
+  }
+  
+  // This does not test the PQ's correctness, but whether topDocs()
+  // implementations return the results in decreasing score order.
+  public void testResultsOrder() throws Exception {
+    TopDocsCollector<ScoreDoc> tdc = doSearch(15);
+    ScoreDoc[] sd = tdc.topDocs().scoreDocs;
+    
+    assertEquals(MAX_SCORE, sd[0].score, 0f);
+    for (int i = 1; i < sd.length; i++) {
+      assertTrue(sd[i - 1].score >= sd[i].score);
+    }
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestTopDocsMerge.java b/lucene/backwards/src/test/org/apache/lucene/search/TestTopDocsMerge.java
new file mode 100644
index 0000000..63cc50b
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestTopDocsMerge.java
@@ -0,0 +1,273 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericField;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.ReaderUtil;
+import org.apache.lucene.util._TestUtil;
+
+public class TestTopDocsMerge extends LuceneTestCase {
+
+  private static class ShardSearcher {
+    private final IndexSearcher subSearcher;
+
+    public ShardSearcher(IndexReader subReader) {
+      this.subSearcher = new IndexSearcher(subReader);
+    }
+
+    public void search(Weight weight, Collector collector) throws IOException {
+      subSearcher.search(weight, null, collector);
+    }
+
+    public TopDocs search(Weight weight, int topN) throws IOException {
+      return subSearcher.search(weight, null, topN);
+    }
+
+    @Override
+    public String toString() {
+      return "ShardSearcher(" + subSearcher + ")";
+    }
+  }
+
+  public void testSort() throws Exception {
+
+    IndexReader reader = null;
+    Directory dir = null;
+
+    final int numDocs = atLeast(1000);
+    //final int numDocs = atLeast(50);
+
+    final String[] tokens = new String[] {"a", "b", "c", "d", "e"};
+
+    if (VERBOSE) {
+      System.out.println("TEST: make index");
+    }
+
+    {
+      dir = newDirectory();
+      final RandomIndexWriter w = new RandomIndexWriter(random, dir);
+      // w.setDoRandomOptimize(false);
+
+      // w.w.getConfig().setMaxBufferedDocs(atLeast(100));
+
+      final String[] content = new String[atLeast(20)];
+
+      for(int contentIDX=0;contentIDX<content.length;contentIDX++) {
+        final StringBuilder sb = new StringBuilder();
+        final int numTokens = _TestUtil.nextInt(random, 1, 10);
+        for(int tokenIDX=0;tokenIDX<numTokens;tokenIDX++) {
+          sb.append(tokens[random.nextInt(tokens.length)]).append(' ');
+        }
+        content[contentIDX] = sb.toString();
+      }
+
+      for(int docIDX=0;docIDX<numDocs;docIDX++) {
+        final Document doc = new Document();
+        doc.add(newField("string", _TestUtil.randomRealisticUnicodeString(random), Field.Index.NOT_ANALYZED));
+        doc.add(newField("text", content[random.nextInt(content.length)], Field.Index.ANALYZED));
+        doc.add(new NumericField("float").setFloatValue(random.nextFloat()));
+        final int intValue;
+        if (random.nextInt(100) == 17) {
+          intValue = Integer.MIN_VALUE;
+        } else if (random.nextInt(100) == 17) {
+          intValue = Integer.MAX_VALUE;
+        } else {
+          intValue = random.nextInt();
+        }
+        doc.add(new NumericField("int").setIntValue(intValue));
+        if (VERBOSE) {
+          System.out.println("  doc=" + doc);
+        }
+        w.addDocument(doc);
+      }
+
+      reader = w.getReader();
+      w.close();
+    }
+
+    // NOTE: sometimes reader has just one segment, which is
+    // important to test
+    final IndexSearcher searcher = newSearcher(reader);
+    IndexReader[] subReaders = searcher.getIndexReader().getSequentialSubReaders();
+    if (subReaders == null) {
+      subReaders = new IndexReader[] {searcher.getIndexReader()};
+    }
+    final ShardSearcher[] subSearchers = new ShardSearcher[subReaders.length];
+
+    for(int searcherIDX=0;searcherIDX<subSearchers.length;searcherIDX++) { 
+      subSearchers[searcherIDX] = new ShardSearcher(subReaders[searcherIDX]);
+    }
+
+    final List<SortField> sortFields = new ArrayList<SortField>();
+    sortFields.add(new SortField("string", SortField.STRING, true));
+    sortFields.add(new SortField("string", SortField.STRING, false));
+    sortFields.add(new SortField("int", SortField.INT, true));
+    sortFields.add(new SortField("int", SortField.INT, false));
+    sortFields.add(new SortField("float", SortField.FLOAT, true));
+    sortFields.add(new SortField("float", SortField.FLOAT, false));
+    sortFields.add(new SortField(null, SortField.SCORE, true));
+    sortFields.add(new SortField(null, SortField.SCORE, false));
+    sortFields.add(new SortField(null, SortField.DOC, true));
+    sortFields.add(new SortField(null, SortField.DOC, false));
+
+    final int[] docStarts = new int[subSearchers.length];
+    int docBase = 0;
+    for(int subIDX=0;subIDX<docStarts.length;subIDX++) {
+      docStarts[subIDX] = docBase;
+      docBase += subReaders[subIDX].maxDoc();
+      if (VERBOSE) {
+        System.out.println("docStarts[" + subIDX + "]=" + docStarts[subIDX]);
+      }
+    }
+
+    for(int iter=0;iter<1000*RANDOM_MULTIPLIER;iter++) {
+
+      // TODO: custom FieldComp...
+      final Query query = new TermQuery(new Term("text", tokens[random.nextInt(tokens.length)]));
+
+      final Sort sort;
+      if (random.nextInt(10) == 4) {
+        // Sort by score
+        sort = null;
+      } else {
+        final SortField[] randomSortFields = new SortField[_TestUtil.nextInt(random, 1, 3)];
+        for(int sortIDX=0;sortIDX<randomSortFields.length;sortIDX++) {
+          randomSortFields[sortIDX] = sortFields.get(random.nextInt(sortFields.size()));
+        }
+        sort = new Sort(randomSortFields);
+      }
+
+      final int numHits = _TestUtil.nextInt(random, 1, numDocs+5);
+      //final int numHits = 5;
+      
+      if (VERBOSE) {
+        System.out.println("TEST: search query=" + query + " sort=" + sort + " numHits=" + numHits);
+      }
+
+      // First search on whole index:
+      final TopDocs topHits;
+      if (sort == null) {
+        topHits = searcher.search(query, numHits);
+      } else {
+        final TopFieldCollector c = TopFieldCollector.create(sort, numHits, true, true, true, random.nextBoolean());
+        searcher.search(query, c);
+        topHits = c.topDocs(0, numHits);
+      }
+
+      if (VERBOSE) {
+        System.out.println("  top search: " + topHits.totalHits + " totalHits; hits=" + (topHits.scoreDocs == null ? "null" : topHits.scoreDocs.length));
+        if (topHits.scoreDocs != null) {
+          for(int hitIDX=0;hitIDX<topHits.scoreDocs.length;hitIDX++) {
+            final ScoreDoc sd = topHits.scoreDocs[hitIDX];
+            System.out.println("    doc=" + sd.doc + " score=" + sd.score);
+          }
+        }
+      }
+
+      // ... then all shards:
+      final Weight w = searcher.createNormalizedWeight(query);
+
+      final TopDocs[] shardHits = new TopDocs[subSearchers.length];
+      for(int shardIDX=0;shardIDX<subSearchers.length;shardIDX++) {
+        final TopDocs subHits;
+        final ShardSearcher subSearcher = subSearchers[shardIDX];
+        if (sort == null) {
+          subHits = subSearcher.search(w, numHits);
+        } else {
+          final TopFieldCollector c = TopFieldCollector.create(sort, numHits, true, true, true, random.nextBoolean());
+          subSearcher.search(w, c);
+          subHits = c.topDocs(0, numHits);
+        }
+        rebaseDocIDs(docStarts[shardIDX], subHits);
+
+        shardHits[shardIDX] = subHits;
+        if (VERBOSE) {
+          System.out.println("  shard=" + shardIDX + " " + subHits.totalHits + " totalHits hits=" + (subHits.scoreDocs == null ? "null" : subHits.scoreDocs.length));
+          if (subHits.scoreDocs != null) {
+            for(ScoreDoc sd : subHits.scoreDocs) {
+              System.out.println("    doc=" + sd.doc + " score=" + sd.score);
+            }
+          }
+        }
+      }
+
+      // Merge:
+      final TopDocs mergedHits = TopDocs.merge(sort, numHits, shardHits);
+
+      if (VERBOSE) {
+        System.out.println("  mergedHits: " + mergedHits.totalHits + " totalHits; hits=" + (mergedHits.scoreDocs == null ? "null" : mergedHits.scoreDocs.length));
+        if (mergedHits.scoreDocs != null) {
+          for(int hitIDX=0;hitIDX<mergedHits.scoreDocs.length;hitIDX++) {
+            final ScoreDoc sd = mergedHits.scoreDocs[hitIDX];
+            System.out.println("    doc=" + sd.doc + " score=" + sd.score);
+          }
+        }
+      }
+      if (mergedHits.scoreDocs != null) {
+        // Make sure the returned shards are correct:
+        for(int hitIDX=0;hitIDX<mergedHits.scoreDocs.length;hitIDX++) {
+          final ScoreDoc sd = mergedHits.scoreDocs[hitIDX];
+          assertEquals("doc=" + sd.doc + " wrong shard",
+                       ReaderUtil.subIndex(sd.doc, docStarts),
+                       sd.shardIndex);
+        }
+      }
+
+      _TestUtil.assertEquals(topHits, mergedHits);
+    }
+    searcher.close();
+    reader.close();
+    dir.close();
+  }
+
+  private void rebaseDocIDs(int docBase, TopDocs hits) {
+    List<Integer> docFieldLocs = new ArrayList<Integer>();
+    if (hits instanceof TopFieldDocs) {
+      TopFieldDocs fieldHits = (TopFieldDocs) hits;
+      for(int fieldIDX=0;fieldIDX<fieldHits.fields.length;fieldIDX++) {
+        if (fieldHits.fields[fieldIDX].getType() == SortField.DOC) {
+          docFieldLocs.add(fieldIDX);
+        }
+      }
+    }
+
+    for(int hitIDX=0;hitIDX<hits.scoreDocs.length;hitIDX++) {
+      final ScoreDoc sd = hits.scoreDocs[hitIDX];
+      sd.doc += docBase;
+      if (sd instanceof FieldDoc) {
+        final FieldDoc fd = (FieldDoc) sd;
+        if (fd.fields != null) {
+          for(int idx : docFieldLocs) {
+            fd.fields[idx] = Integer.valueOf(((Integer) fd.fields[idx]).intValue() + docBase);
+          }
+        }
+      }
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java b/lucene/backwards/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java
new file mode 100644
index 0000000..102dce5
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java
@@ -0,0 +1,69 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestTopScoreDocCollector extends LuceneTestCase {
+
+  public void testOutOfOrderCollection() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir);
+    for (int i = 0; i < 10; i++) {
+      writer.addDocument(new Document());
+    }
+    
+    boolean[] inOrder = new boolean[] { false, true };
+    String[] actualTSDCClass = new String[] {
+        "OutOfOrderTopScoreDocCollector", 
+        "InOrderTopScoreDocCollector" 
+    };
+    
+    BooleanQuery bq = new BooleanQuery();
+    // Add a Query with SHOULD, since bw.scorer() returns BooleanScorer2
+    // which delegates to BS if there are no mandatory clauses.
+    bq.add(new MatchAllDocsQuery(), Occur.SHOULD);
+    // Set minNrShouldMatch to 1 so that BQ will not optimize rewrite to return
+    // the clause instead of BQ.
+    bq.setMinimumNumberShouldMatch(1);
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = newSearcher(reader);
+    for (int i = 0; i < inOrder.length; i++) {
+      TopDocsCollector<ScoreDoc> tdc = TopScoreDocCollector.create(3, inOrder[i]);
+      assertEquals("org.apache.lucene.search.TopScoreDocCollector$" + actualTSDCClass[i], tdc.getClass().getName());
+      
+      searcher.search(new MatchAllDocsQuery(), tdc);
+      
+      ScoreDoc[] sd = tdc.topDocs().scoreDocs;
+      assertEquals(3, sd.length);
+      for (int j = 0; j < sd.length; j++) {
+        assertEquals("expected doc Id " + j + " found " + sd[j].doc, j, sd[j].doc);
+      }
+    }
+    writer.close();
+    searcher.close();
+    reader.close();
+    dir.close();
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestWildcard.java b/lucene/backwards/src/test/org/apache/lucene/search/TestWildcard.java
new file mode 100644
index 0000000..e211257
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestWildcard.java
@@ -0,0 +1,342 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queryParser.QueryParser;
+
+import java.io.IOException;
+
+/**
+ * TestWildcard tests the '*' and '?' wildcard characters.
+ */
+public class TestWildcard
+    extends LuceneTestCase {
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+  }
+
+  public void testEquals() {
+    WildcardQuery wq1 = new WildcardQuery(new Term("field", "b*a"));
+    WildcardQuery wq2 = new WildcardQuery(new Term("field", "b*a"));
+    WildcardQuery wq3 = new WildcardQuery(new Term("field", "b*a"));
+
+    // reflexive?
+    assertEquals(wq1, wq2);
+    assertEquals(wq2, wq1);
+
+    // transitive?
+    assertEquals(wq2, wq3);
+    assertEquals(wq1, wq3);
+
+    assertFalse(wq1.equals(null));
+
+    FuzzyQuery fq = new FuzzyQuery(new Term("field", "b*a"));
+    assertFalse(wq1.equals(fq));
+    assertFalse(fq.equals(wq1));
+  }
+  
+  /**
+   * Tests if a WildcardQuery that has no wildcard in the term is rewritten to a single
+   * TermQuery. The boost should be preserved, and the rewrite should return
+   * a ConstantScoreQuery if the WildcardQuery had a ConstantScore rewriteMethod.
+   */
+  public void testTermWithoutWildcard() throws IOException {
+      Directory indexStore = getIndexStore("field", new String[]{"nowildcard", "nowildcardx"});
+      IndexSearcher searcher = new IndexSearcher(indexStore, true);
+
+      MultiTermQuery wq = new WildcardQuery(new Term("field", "nowildcard"));
+      assertMatches(searcher, wq, 1);
+
+      wq.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
+      wq.setBoost(0.1F);
+      Query q = searcher.rewrite(wq);
+      assertTrue(q instanceof TermQuery);
+      assertEquals(q.getBoost(), wq.getBoost());
+      
+      wq.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
+      wq.setBoost(0.2F);
+      q = searcher.rewrite(wq);
+      assertTrue(q instanceof ConstantScoreQuery);
+      assertEquals(q.getBoost(), wq.getBoost());
+      
+      wq.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT);
+      wq.setBoost(0.3F);
+      q = searcher.rewrite(wq);
+      assertTrue(q instanceof ConstantScoreQuery);
+      assertEquals(q.getBoost(), wq.getBoost());
+      
+      wq.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
+      wq.setBoost(0.4F);
+      q = searcher.rewrite(wq);
+      assertTrue(q instanceof ConstantScoreQuery);
+      assertEquals(q.getBoost(), wq.getBoost());
+      searcher.close();
+      indexStore.close();
+  }
+  
+  /**
+   * Tests if a WildcardQuery with an empty term is rewritten to an empty BooleanQuery
+   */
+  public void testEmptyTerm() throws IOException {
+    Directory indexStore = getIndexStore("field", new String[]{"nowildcard", "nowildcardx"});
+    IndexSearcher searcher = new IndexSearcher(indexStore, true);
+
+    MultiTermQuery wq = new WildcardQuery(new Term("field", ""));
+    wq.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
+    assertMatches(searcher, wq, 0);
+    Query q = searcher.rewrite(wq);
+    assertTrue(q instanceof BooleanQuery);
+    assertEquals(0, ((BooleanQuery) q).clauses().size());
+    searcher.close();
+    indexStore.close();
+  }
+  
+  /**
+   * Tests if a WildcardQuery that has only a trailing * in the term is
+   * rewritten to a single PrefixQuery. The boost and rewriteMethod should be
+   * preserved.
+   */
+  public void testPrefixTerm() throws IOException {
+    Directory indexStore = getIndexStore("field", new String[]{"prefix", "prefixx"});
+    IndexSearcher searcher = new IndexSearcher(indexStore, true);
+
+    MultiTermQuery wq = new WildcardQuery(new Term("field", "prefix*"));
+    assertMatches(searcher, wq, 2);
+    assertTrue(wq.getEnum(searcher.getIndexReader()) instanceof PrefixTermEnum);
+   
+    searcher.close();
+    indexStore.close();
+  }
+
+  /**
+   * Tests Wildcard queries with an asterisk.
+   */
+  public void testAsterisk()
+      throws IOException {
+    Directory indexStore = getIndexStore("body", new String[]
+    {"metal", "metals"});
+    IndexSearcher searcher = new IndexSearcher(indexStore, true);
+    Query query1 = new TermQuery(new Term("body", "metal"));
+    Query query2 = new WildcardQuery(new Term("body", "metal*"));
+    Query query3 = new WildcardQuery(new Term("body", "m*tal"));
+    Query query4 = new WildcardQuery(new Term("body", "m*tal*"));
+    Query query5 = new WildcardQuery(new Term("body", "m*tals"));
+
+    BooleanQuery query6 = new BooleanQuery();
+    query6.add(query5, BooleanClause.Occur.SHOULD);
+
+    BooleanQuery query7 = new BooleanQuery();
+    query7.add(query3, BooleanClause.Occur.SHOULD);
+    query7.add(query5, BooleanClause.Occur.SHOULD);
+
+    // Queries do not automatically lower-case search terms:
+    Query query8 = new WildcardQuery(new Term("body", "M*tal*"));
+
+    assertMatches(searcher, query1, 1);
+    assertMatches(searcher, query2, 2);
+    assertMatches(searcher, query3, 1);
+    assertMatches(searcher, query4, 2);
+    assertMatches(searcher, query5, 1);
+    assertMatches(searcher, query6, 1);
+    assertMatches(searcher, query7, 2);
+    assertMatches(searcher, query8, 0);
+    assertMatches(searcher, new WildcardQuery(new Term("body", "*tall")), 0);
+    assertMatches(searcher, new WildcardQuery(new Term("body", "*tal")), 1);
+    assertMatches(searcher, new WildcardQuery(new Term("body", "*tal*")), 2);
+    searcher.close();
+    indexStore.close();
+  }
+
+  /**
+   * LUCENE-2620
+   */
+  public void testLotsOfAsterisks()
+      throws IOException {
+    Directory indexStore = getIndexStore("body", new String[]
+    {"metal", "metals"});
+    IndexSearcher searcher = new IndexSearcher(indexStore, true);
+    StringBuilder term = new StringBuilder();
+    term.append("m");
+    for (int i = 0; i < 512; i++)
+      term.append("*");
+    term.append("tal");
+    Query query3 = new WildcardQuery(new Term("body", term.toString()));
+
+    assertMatches(searcher, query3, 1);
+    searcher.close();
+    indexStore.close();
+  }
+  
+  /**
+   * Tests Wildcard queries with a question mark.
+   *
+   * @throws IOException if an error occurs
+   */
+  public void testQuestionmark()
+      throws IOException {
+    Directory indexStore = getIndexStore("body", new String[]
+    {"metal", "metals", "mXtals", "mXtXls"});
+    IndexSearcher searcher = new IndexSearcher(indexStore, true);
+    Query query1 = new WildcardQuery(new Term("body", "m?tal"));
+    Query query2 = new WildcardQuery(new Term("body", "metal?"));
+    Query query3 = new WildcardQuery(new Term("body", "metals?"));
+    Query query4 = new WildcardQuery(new Term("body", "m?t?ls"));
+    Query query5 = new WildcardQuery(new Term("body", "M?t?ls"));
+    Query query6 = new WildcardQuery(new Term("body", "meta??"));
+    
+    assertMatches(searcher, query1, 1); 
+    assertMatches(searcher, query2, 1);
+    assertMatches(searcher, query3, 0);
+    assertMatches(searcher, query4, 3);
+    assertMatches(searcher, query5, 0);
+    assertMatches(searcher, query6, 1); // Query: 'meta??' matches 'metals' not 'metal'
+    searcher.close();
+    indexStore.close();
+  }
+
+  private Directory getIndexStore(String field, String[] contents)
+      throws IOException {
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, indexStore);
+    for (int i = 0; i < contents.length; ++i) {
+      Document doc = new Document();
+      doc.add(newField(field, contents[i], Field.Store.YES, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+    writer.close();
+
+    return indexStore;
+  }
+
+  private void assertMatches(IndexSearcher searcher, Query q, int expectedMatches)
+      throws IOException {
+    ScoreDoc[] result = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals(expectedMatches, result.length);
+  }
+
+  /**
+   * Test that wild card queries are parsed to the correct type and are searched correctly.
+   * This test looks at both parsing and execution of wildcard queries.
+   * Although placed here, it also tests prefix queries, verifying that
+   * prefix queries are not parsed into wild card queries, and viceversa.
+   * @throws Exception
+   */
+  public void testParsingAndSearching() throws Exception {
+    String field = "content";
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, field, new MockAnalyzer(random));
+    qp.setAllowLeadingWildcard(true);
+    String docs[] = {
+        "\\ abcdefg1",
+        "\\79 hijklmn1",
+        "\\\\ opqrstu1",
+    };
+    // queries that should find all docs
+    String matchAll[] = {
+        "*", "*1", "**1", "*?", "*?1", "?*1", "**", "***", "\\\\*"
+    };
+    // queries that should find no docs
+    String matchNone[] = {
+        "a*h", "a?h", "*a*h", "?a", "a?",
+    };
+    // queries that should be parsed to prefix queries
+    String matchOneDocPrefix[][] = {
+        {"a*", "ab*", "abc*", }, // these should find only doc 0 
+        {"h*", "hi*", "hij*", "\\\\7*"}, // these should find only doc 1
+        {"o*", "op*", "opq*", "\\\\\\\\*"}, // these should find only doc 2
+    };
+    // queries that should be parsed to wildcard queries
+    String matchOneDocWild[][] = {
+        {"*a*", "*ab*", "*abc**", "ab*e*", "*g?", "*f?1", "abc**"}, // these should find only doc 0
+        {"*h*", "*hi*", "*hij**", "hi*k*", "*n?", "*m?1", "hij**"}, // these should find only doc 1
+        {"*o*", "*op*", "*opq**", "op*q*", "*u?", "*t?1", "opq**"}, // these should find only doc 2
+    };
+
+    // prepare the index
+    Directory dir = newDirectory();
+    RandomIndexWriter iw = new RandomIndexWriter(random, dir, 
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMergePolicy(newLogMergePolicy()));
+    for (int i = 0; i < docs.length; i++) {
+      Document doc = new Document();
+      doc.add(newField(field,docs[i],Store.NO,Index.ANALYZED));
+      iw.addDocument(doc);
+    }
+    iw.close();
+    
+    IndexSearcher searcher = new IndexSearcher(dir, true);
+    
+    // test queries that must find all
+    for (int i = 0; i < matchAll.length; i++) {
+      String qtxt = matchAll[i];
+      Query q = qp.parse(qtxt);
+      if (VERBOSE) System.out.println("matchAll: qtxt="+qtxt+" q="+q+" "+q.getClass().getName());
+      ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
+      assertEquals(docs.length,hits.length);
+    }
+    
+    // test queries that must find none
+    for (int i = 0; i < matchNone.length; i++) {
+      String qtxt = matchNone[i];
+      Query q = qp.parse(qtxt);
+      if (VERBOSE) System.out.println("matchNone: qtxt="+qtxt+" q="+q+" "+q.getClass().getName());
+      ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
+      assertEquals(0,hits.length);
+    }
+
+    // test queries that must be prefix queries and must find only one doc
+    for (int i = 0; i < matchOneDocPrefix.length; i++) {
+      for (int j = 0; j < matchOneDocPrefix[i].length; j++) {
+        String qtxt = matchOneDocPrefix[i][j];
+        Query q = qp.parse(qtxt);
+        if (VERBOSE) System.out.println("match 1 prefix: doc="+docs[i]+" qtxt="+qtxt+" q="+q+" "+q.getClass().getName());
+        assertEquals(PrefixQuery.class, q.getClass());
+        ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
+        assertEquals(1,hits.length);
+        assertEquals(i,hits[0].doc);
+      }
+    }
+
+    // test queries that must be wildcard queries and must find only one doc
+    for (int i = 0; i < matchOneDocPrefix.length; i++) {
+      for (int j = 0; j < matchOneDocWild[i].length; j++) {
+        String qtxt = matchOneDocWild[i][j];
+        Query q = qp.parse(qtxt);
+        if (VERBOSE) System.out.println("match 1 wild: doc="+docs[i]+" qtxt="+qtxt+" q="+q+" "+q.getClass().getName());
+        assertEquals(WildcardQuery.class, q.getClass());
+        ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
+        assertEquals(1,hits.length);
+        assertEquals(i,hits[0].doc);
+      }
+    }
+
+    searcher.close();
+    dir.close();
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestWildcardRandom.java b/lucene/backwards/src/test/org/apache/lucene/search/TestWildcardRandom.java
new file mode 100644
index 0000000..ae91efd
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/TestWildcardRandom.java
@@ -0,0 +1,139 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.text.DecimalFormat;
+import java.text.DecimalFormatSymbols;
+import java.text.NumberFormat;
+import java.util.Locale;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+/**
+ * Create an index with terms from 000-999.
+ * Generates random wildcards according to patterns,
+ * and validates the correct number of hits are returned.
+ */
+public class TestWildcardRandom extends LuceneTestCase {
+  private IndexSearcher searcher;
+  private IndexReader reader;
+  private Directory dir;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
+    
+    Document doc = new Document();
+    Field bogus1 = newField("bogus1", "", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS);
+    Field field = newField("field", "", Field.Store.NO, Field.Index.ANALYZED_NO_NORMS);
+    Field bogus2 = newField("zbogus2", "", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS);
+    doc.add(field);
+    doc.add(bogus1);
+    doc.add(bogus2);
+    
+    NumberFormat df = new DecimalFormat("000", new DecimalFormatSymbols(Locale.ENGLISH));
+    for (int i = 0; i < 1000; i++) {
+      field.setValue(df.format(i));
+      bogus1.setValue(_TestUtil.randomUnicodeString(random, 10));
+      bogus2.setValue(_TestUtil.randomUnicodeString(random, 10));
+      writer.addDocument(doc);
+    }
+    
+    reader = writer.getReader();
+    searcher = newSearcher(reader);
+    writer.close();
+  }
+  
+  private char N() {
+    return (char) (0x30 + random.nextInt(10));
+  }
+  
+  private String fillPattern(String wildcardPattern) {
+    StringBuilder sb = new StringBuilder();
+    for (int i = 0; i < wildcardPattern.length(); i++) {
+      switch(wildcardPattern.charAt(i)) {
+        case 'N':
+          sb.append(N());
+          break;
+        default:
+          sb.append(wildcardPattern.charAt(i));
+      }
+    }
+    return sb.toString();
+  }
+  
+  private void assertPatternHits(String pattern, int numHits) throws Exception {
+    // TODO: run with different rewrites
+    Query wq = new WildcardQuery(new Term("field", fillPattern(pattern)));
+    TopDocs docs = searcher.search(wq, 25);
+    assertEquals("Incorrect hits for pattern: " + pattern, numHits, docs.totalHits);
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    searcher.close();
+    reader.close();
+    dir.close();
+    super.tearDown();
+  }
+  
+  public void testWildcards() throws Exception {;
+    int num = atLeast(1);
+    for (int i = 0; i < num; i++) {
+      assertPatternHits("NNN", 1);
+      assertPatternHits("?NN", 10);
+      assertPatternHits("N?N", 10);
+      assertPatternHits("NN?", 10);
+    }
+    
+    for (int i = 0; i < num; i++) {
+      assertPatternHits("??N", 100);
+      assertPatternHits("N??", 100);
+      assertPatternHits("???", 1000);
+      
+      assertPatternHits("NN*", 10);
+      assertPatternHits("N*", 100);
+      assertPatternHits("*", 1000);
+      
+      assertPatternHits("*NN", 10);
+      assertPatternHits("*N", 100);
+      
+      assertPatternHits("N*N", 10);
+      
+      // combo of ? and * operators
+      assertPatternHits("?N*", 100);
+      assertPatternHits("N?*", 100);
+      
+      assertPatternHits("*N?", 100);
+      assertPatternHits("*??", 1000);
+      assertPatternHits("*?N", 100);
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/function/FunctionTestSetup.java b/lucene/backwards/src/test/org/apache/lucene/search/function/FunctionTestSetup.java
new file mode 100755
index 0000000..2b5d017
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/function/FunctionTestSetup.java
@@ -0,0 +1,167 @@
+package org.apache.lucene.search.function;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import org.junit.AfterClass;
+import org.junit.Ignore;
+
+/**
+ * Setup for function tests
+ */
+@Ignore
+public abstract class FunctionTestSetup extends LuceneTestCase {
+
+  /**
+   * Actual score computation order is slightly different than assumptios
+   * this allows for a small amount of variation
+   */
+  protected static float TEST_SCORE_TOLERANCE_DELTA = 0.001f;
+
+  protected static final int N_DOCS = 17; // select a primary number > 2
+
+  protected static final String ID_FIELD = "id";
+  protected static final String TEXT_FIELD = "text";
+  protected static final String INT_FIELD = "iii";
+  protected static final String FLOAT_FIELD = "fff";
+
+  private static final String DOC_TEXT_LINES[] = {
+          "Well, this is just some plain text we use for creating the ",
+          "test documents. It used to be a text from an online collection ",
+          "devoted to first aid, but if there was there an (online) lawyers ",
+          "first aid collection with legal advices, \"it\" might have quite ",
+          "probably advised one not to include \"it\"'s text or the text of ",
+          "any other online collection in one's code, unless one has money ",
+          "that one don't need and one is happy to donate for lawyers ",
+          "charity. Anyhow at some point, rechecking the usage of this text, ",
+          "it became uncertain that this text is free to use, because ",
+          "the web site in the disclaimer of he eBook containing that text ",
+          "was not responding anymore, and at the same time, in projGut, ",
+          "searching for first aid no longer found that eBook as well. ",
+          "So here we are, with a perhaps much less interesting ",
+          "text for the test, but oh much much safer. ",
+  };
+  
+  protected static Directory dir;
+  protected static Analyzer anlzr;
+  
+  @AfterClass
+  public static void afterClassFunctionTestSetup() throws Exception {
+    dir.close();
+    dir = null;
+    anlzr = null;
+  }
+
+  protected static void createIndex(boolean doMultiSegment) throws Exception {
+    if (VERBOSE) {
+      System.out.println("TEST: setUp");
+    }
+    // prepare a small index with just a few documents.  
+    dir = newDirectory();
+    anlzr = new MockAnalyzer(random);
+    IndexWriterConfig iwc = newIndexWriterConfig( TEST_VERSION_CURRENT, anlzr).setMergePolicy(newLogMergePolicy());
+    RandomIndexWriter iw = new RandomIndexWriter(random, dir, iwc);
+    if (doMultiSegment) {
+      iw.w.setMaxBufferedDocs(_TestUtil.nextInt(random, 2, 7));
+    }
+
+    iw.w.setInfoStream(VERBOSE ? System.out : null);
+    // add docs not exactly in natural ID order, to verify we do check the order of docs by scores
+    int remaining = N_DOCS;
+    boolean done[] = new boolean[N_DOCS];
+    int i = 0;
+    while (remaining > 0) {
+      if (done[i]) {
+        throw new Exception("to set this test correctly N_DOCS=" + N_DOCS + " must be primary and greater than 2!");
+      }
+      addDoc(iw, i);
+      done[i] = true;
+      i = (i + 4) % N_DOCS;
+      remaining --;
+    }
+    if (!doMultiSegment) {
+      if (VERBOSE) {
+        System.out.println("TEST: setUp optimize");
+      }
+      iw.optimize();
+    }
+    iw.close();
+    if (VERBOSE) {
+      System.out.println("TEST: setUp done close");
+    }
+  }
+
+  private static void addDoc(RandomIndexWriter iw, int i) throws Exception {
+    Document d = new Document();
+    Fieldable f;
+    int scoreAndID = i + 1;
+
+    f = newField(ID_FIELD, id2String(scoreAndID), Field.Store.YES, Field.Index.NOT_ANALYZED); // for debug purposes
+    f.setOmitNorms(true);
+    d.add(f);
+
+    f = newField(TEXT_FIELD, "text of doc" + scoreAndID + textLine(i), Field.Store.NO, Field.Index.ANALYZED); // for regular search
+    f.setOmitNorms(true);
+    d.add(f);
+
+    f = newField(INT_FIELD, "" + scoreAndID, Field.Store.NO, Field.Index.NOT_ANALYZED); // for function scoring
+    f.setOmitNorms(true);
+    d.add(f);
+
+    f = newField(FLOAT_FIELD, scoreAndID + ".000", Field.Store.NO, Field.Index.NOT_ANALYZED); // for function scoring
+    f.setOmitNorms(true);
+    d.add(f);
+
+    iw.addDocument(d);
+    log("added: " + d);
+  }
+
+  // 17 --> ID00017
+  protected static String id2String(int scoreAndID) {
+    String s = "000000000" + scoreAndID;
+    int n = ("" + N_DOCS).length() + 3;
+    int k = s.length() - n;
+    return "ID" + s.substring(k);
+  }
+
+  // some text line for regular search
+  private static String textLine(int docNum) {
+    return DOC_TEXT_LINES[docNum % DOC_TEXT_LINES.length];
+  }
+
+  // extract expected doc score from its ID Field: "ID7" --> 7.0
+  protected static float expectedFieldScore(String docIDFieldVal) {
+    return Float.parseFloat(docIDFieldVal.substring(2));
+  }
+
+  // debug messages (change DBG to true for anything to print) 
+  protected static void log(Object o) {
+    if (VERBOSE) {
+      System.out.println(o.toString());
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/function/JustCompileSearchSpans.java b/lucene/backwards/src/test/org/apache/lucene/search/function/JustCompileSearchSpans.java
new file mode 100644
index 0000000..a85f040
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/function/JustCompileSearchSpans.java
@@ -0,0 +1,96 @@
+package org.apache.lucene.search.function;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.FieldCache;
+
+import java.io.IOException;
+
+/**
+ * Holds all implementations of classes in the o.a.l.s.function package as a
+ * back-compatibility test. It does not run any tests per-se, however if
+ * someone adds a method to an interface or abstract method to an abstract
+ * class, one of the implementations here will fail to compile and so we know
+ * back-compat policy was violated.
+ */
+final class JustCompileSearchFunction {
+
+  private static final String UNSUPPORTED_MSG = "unsupported: used for back-compat testing only !";
+
+  static final class JustCompileDocValues extends DocValues {
+    @Override
+    public float floatVal(int doc) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public String toString(int doc) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+  }
+
+  static final class JustCompileFieldCacheSource extends FieldCacheSource {
+
+    public JustCompileFieldCacheSource(String field) {
+      super(field);
+    }
+
+    @Override
+    public boolean cachedFieldSourceEquals(FieldCacheSource other) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public int cachedFieldSourceHashCode() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public DocValues getCachedFieldValues(FieldCache cache, String field,
+                                          IndexReader reader) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+  }
+
+  static final class JustCompileValueSource extends ValueSource {
+    @Override
+    public String description() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public DocValues getValues(IndexReader reader) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public int hashCode() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java
new file mode 100755
index 0000000..bf3a554
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java
@@ -0,0 +1,349 @@
+package org.apache.lucene.search.function;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.queryParser.ParseException;
+import org.apache.lucene.search.*;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+
+/**
+ * Test CustomScoreQuery search.
+ */
+public class TestCustomScoreQuery extends FunctionTestSetup {
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    createIndex(true);
+  }
+
+  /**
+   * Test that CustomScoreQuery of Type.BYTE returns the expected scores.
+   */
+  @Test
+  public void testCustomScoreByte() throws Exception, ParseException {
+    // INT field values are small enough to be parsed as byte
+    doTestCustomScore(INT_FIELD, FieldScoreQuery.Type.BYTE, 1.0);
+    doTestCustomScore(INT_FIELD, FieldScoreQuery.Type.BYTE, 2.0);
+  }
+
+  /**
+   * Test that CustomScoreQuery of Type.SHORT returns the expected scores.
+   */
+  @Test
+  public void testCustomScoreShort() throws Exception, ParseException {
+    // INT field values are small enough to be parsed as short
+    doTestCustomScore(INT_FIELD, FieldScoreQuery.Type.SHORT, 1.0);
+    doTestCustomScore(INT_FIELD, FieldScoreQuery.Type.SHORT, 3.0);
+  }
+
+  /**
+   * Test that CustomScoreQuery of Type.INT returns the expected scores.
+   */
+  @Test
+  public void testCustomScoreInt() throws Exception, ParseException {
+    doTestCustomScore(INT_FIELD, FieldScoreQuery.Type.INT, 1.0);
+    doTestCustomScore(INT_FIELD, FieldScoreQuery.Type.INT, 4.0);
+  }
+
+  /**
+   * Test that CustomScoreQuery of Type.FLOAT returns the expected scores.
+   */
+  @Test
+  public void testCustomScoreFloat() throws Exception, ParseException {
+    // INT field can be parsed as float
+    doTestCustomScore(INT_FIELD, FieldScoreQuery.Type.FLOAT, 1.0);
+    doTestCustomScore(INT_FIELD, FieldScoreQuery.Type.FLOAT, 5.0);
+    // same values, but in float format
+    doTestCustomScore(FLOAT_FIELD, FieldScoreQuery.Type.FLOAT, 1.0);
+    doTestCustomScore(FLOAT_FIELD, FieldScoreQuery.Type.FLOAT, 6.0);
+  }
+
+  // must have static class otherwise serialization tests fail
+  private static class CustomAddQuery extends CustomScoreQuery {
+    // constructor
+    CustomAddQuery(Query q, ValueSourceQuery qValSrc) {
+      super(q, qValSrc);
+    }
+
+    /*(non-Javadoc) @see org.apache.lucene.search.function.CustomScoreQuery#name() */
+    @Override
+    public String name() {
+      return "customAdd";
+    }
+    
+    @Override
+    protected CustomScoreProvider getCustomScoreProvider(IndexReader reader) {
+      return new CustomScoreProvider(reader) {
+        @Override
+        public float customScore(int doc, float subQueryScore, float valSrcScore) {
+          return subQueryScore + valSrcScore;
+        }
+
+        @Override
+        public Explanation customExplain(int doc, Explanation subQueryExpl, Explanation valSrcExpl) {
+          float valSrcScore = valSrcExpl == null ? 0 : valSrcExpl.getValue();
+          Explanation exp = new Explanation(valSrcScore + subQueryExpl.getValue(), "custom score: sum of:");
+          exp.addDetail(subQueryExpl);
+          if (valSrcExpl != null) {
+            exp.addDetail(valSrcExpl);
+          }
+          return exp;
+        }
+      };
+    }
+  }
+
+  // must have static class otherwise serialization tests fail
+  private static class CustomMulAddQuery extends CustomScoreQuery {
+    // constructor
+    CustomMulAddQuery(Query q, ValueSourceQuery qValSrc1, ValueSourceQuery qValSrc2) {
+      super(q, new ValueSourceQuery[]{qValSrc1, qValSrc2});
+    }
+
+    /*(non-Javadoc) @see org.apache.lucene.search.function.CustomScoreQuery#name() */
+    @Override
+    public String name() {
+      return "customMulAdd";
+    }
+
+    @Override
+    protected CustomScoreProvider getCustomScoreProvider(IndexReader reader) {
+      return new CustomScoreProvider(reader) {
+        @Override
+        public float customScore(int doc, float subQueryScore, float valSrcScores[]) {
+          if (valSrcScores.length == 0) {
+            return subQueryScore;
+          }
+          if (valSrcScores.length == 1) {
+            return subQueryScore + valSrcScores[0];
+            // confirm that skipping beyond the last doc, on the
+            // previous reader, hits NO_MORE_DOCS
+          }
+          return (subQueryScore + valSrcScores[0]) * valSrcScores[1]; // we know there are two
+        }
+
+        @Override
+        public Explanation customExplain(int doc, Explanation subQueryExpl, Explanation valSrcExpls[]) {
+          if (valSrcExpls.length == 0) {
+            return subQueryExpl;
+          }
+          Explanation exp = new Explanation(valSrcExpls[0].getValue() + subQueryExpl.getValue(), "sum of:");
+          exp.addDetail(subQueryExpl);
+          exp.addDetail(valSrcExpls[0]);
+          if (valSrcExpls.length == 1) {
+            exp.setDescription("CustomMulAdd, sum of:");
+            return exp;
+          }
+          Explanation exp2 = new Explanation(valSrcExpls[1].getValue() * exp.getValue(), "custom score: product of:");
+          exp2.addDetail(valSrcExpls[1]);
+          exp2.addDetail(exp);
+          return exp2;
+        }
+      };
+    }
+  }
+
+  private final class CustomExternalQuery extends CustomScoreQuery {
+
+    @Override
+    protected CustomScoreProvider getCustomScoreProvider(IndexReader reader) throws IOException {
+      final int[] values = FieldCache.DEFAULT.getInts(reader, INT_FIELD);
+      return new CustomScoreProvider(reader) {
+        @Override
+        public float customScore(int doc, float subScore, float valSrcScore) throws IOException {
+          assertTrue(doc <= reader.maxDoc());
+          return values[doc];
+        }
+      };
+    }
+
+    public CustomExternalQuery(Query q) {
+      super(q);
+    }
+  }
+
+  @Test
+  public void testCustomExternalQuery() throws Exception {
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, TEXT_FIELD,anlzr); 
+    String qtxt = "first aid text"; // from the doc texts in FunctionQuerySetup.
+    Query q1 = qp.parse(qtxt); 
+    
+    final Query q = new CustomExternalQuery(q1);
+    log(q);
+
+    IndexSearcher s = new IndexSearcher(dir, true);
+    TopDocs hits = s.search(q, 1000);
+    assertEquals(N_DOCS, hits.totalHits);
+    for(int i=0;i<N_DOCS;i++) {
+      final int doc = hits.scoreDocs[i].doc;
+      final float score = hits.scoreDocs[i].score;
+      assertEquals("doc=" + doc, (float) 1+(4*doc) % N_DOCS, score, 0.0001);
+    }
+    s.close();
+  }
+  
+  @Test
+  public void testRewrite() throws Exception {
+    final IndexSearcher s = new IndexSearcher(dir, true);
+
+    Query q = new TermQuery(new Term(TEXT_FIELD, "first"));
+    CustomScoreQuery original = new CustomScoreQuery(q);
+    CustomScoreQuery rewritten = (CustomScoreQuery) original.rewrite(s.getIndexReader());
+    assertTrue("rewritten query should be identical, as TermQuery does not rewrite", original == rewritten);
+    assertTrue("no hits for query", s.search(rewritten,1).totalHits > 0);
+    assertEquals(s.search(q,1).totalHits, s.search(rewritten,1).totalHits);
+
+    q = new TermRangeQuery(TEXT_FIELD, null, null, true, true); // everything
+    original = new CustomScoreQuery(q);
+    rewritten = (CustomScoreQuery) original.rewrite(s.getIndexReader());
+    assertTrue("rewritten query should not be identical, as TermRangeQuery rewrites", original != rewritten);
+    assertTrue("no hits for query", s.search(rewritten,1).totalHits > 0);
+    assertEquals(s.search(q,1).totalHits, s.search(original,1).totalHits);
+    assertEquals(s.search(q,1).totalHits, s.search(rewritten,1).totalHits);
+    
+    s.close();
+  }
+  
+  // Test that FieldScoreQuery returns docs with expected score.
+  private void doTestCustomScore(String field, FieldScoreQuery.Type tp, double dboost) throws Exception, ParseException {
+    float boost = (float) dboost;
+    IndexSearcher s = new IndexSearcher(dir, true);
+    FieldScoreQuery qValSrc = new FieldScoreQuery(field, tp); // a query that would score by the field
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, TEXT_FIELD, anlzr);
+    String qtxt = "first aid text"; // from the doc texts in FunctionQuerySetup.
+
+    // regular (boolean) query.
+    Query q1 = qp.parse(qtxt);
+    log(q1);
+
+    // custom query, that should score the same as q1.
+    Query q2CustomNeutral = new CustomScoreQuery(q1);
+    q2CustomNeutral.setBoost(boost);
+    log(q2CustomNeutral);
+
+    // custom query, that should (by default) multiply the scores of q1 by that of the field
+    CustomScoreQuery q3CustomMul = new CustomScoreQuery(q1, qValSrc);
+    q3CustomMul.setStrict(true);
+    q3CustomMul.setBoost(boost);
+    log(q3CustomMul);
+
+    // custom query, that should add the scores of q1 to that of the field
+    CustomScoreQuery q4CustomAdd = new CustomAddQuery(q1, qValSrc);
+    q4CustomAdd.setStrict(true);
+    q4CustomAdd.setBoost(boost);
+    log(q4CustomAdd);
+
+    // custom query, that multiplies and adds the field score to that of q1
+    CustomScoreQuery q5CustomMulAdd = new CustomMulAddQuery(q1, qValSrc, qValSrc);
+    q5CustomMulAdd.setStrict(true);
+    q5CustomMulAdd.setBoost(boost);
+    log(q5CustomMulAdd);
+
+    // do al the searches 
+    TopDocs td1 = s.search(q1, null, 1000);
+    TopDocs td2CustomNeutral = s.search(q2CustomNeutral, null, 1000);
+    TopDocs td3CustomMul = s.search(q3CustomMul, null, 1000);
+    TopDocs td4CustomAdd = s.search(q4CustomAdd, null, 1000);
+    TopDocs td5CustomMulAdd = s.search(q5CustomMulAdd, null, 1000);
+
+    // put results in map so we can verify the scores although they have changed
+    Map<Integer,Float> h1               = topDocsToMap(td1);
+    Map<Integer,Float> h2CustomNeutral  = topDocsToMap(td2CustomNeutral);
+    Map<Integer,Float> h3CustomMul      = topDocsToMap(td3CustomMul);
+    Map<Integer,Float> h4CustomAdd      = topDocsToMap(td4CustomAdd);
+    Map<Integer,Float> h5CustomMulAdd   = topDocsToMap(td5CustomMulAdd);
+    
+    verifyResults(boost, s, 
+        h1, h2CustomNeutral, h3CustomMul, h4CustomAdd, h5CustomMulAdd,
+        q1, q2CustomNeutral, q3CustomMul, q4CustomAdd, q5CustomMulAdd);
+    s.close();
+  }
+
+  // verify results are as expected.
+  private void verifyResults(float boost, IndexSearcher s, 
+      Map<Integer,Float> h1, Map<Integer,Float> h2customNeutral, Map<Integer,Float> h3CustomMul, Map<Integer,Float> h4CustomAdd, Map<Integer,Float> h5CustomMulAdd,
+      Query q1, Query q2, Query q3, Query q4, Query q5) throws Exception {
+    
+    // verify numbers of matches
+    log("#hits = "+h1.size());
+    assertEquals("queries should have same #hits",h1.size(),h2customNeutral.size());
+    assertEquals("queries should have same #hits",h1.size(),h3CustomMul.size());
+    assertEquals("queries should have same #hits",h1.size(),h4CustomAdd.size());
+    assertEquals("queries should have same #hits",h1.size(),h5CustomMulAdd.size());
+
+    QueryUtils.check(random, q1,s);
+    QueryUtils.check(random, q2,s);
+    QueryUtils.check(random, q3,s);
+    QueryUtils.check(random, q4,s);
+    QueryUtils.check(random, q5,s);
+
+    // verify scores ratios
+    for (final Integer doc : h1.keySet()) {
+
+      log("doc = "+doc);
+
+      float fieldScore = expectedFieldScore(s.getIndexReader().document(doc).get(ID_FIELD));
+      log("fieldScore = " + fieldScore);
+      assertTrue("fieldScore should not be 0", fieldScore > 0);
+
+      float score1 = h1.get(doc);
+      logResult("score1=", s, q1, doc, score1);
+      
+      float score2 = h2customNeutral.get(doc);
+      logResult("score2=", s, q2, doc, score2);
+      assertEquals("same score (just boosted) for neutral", boost * score1, score2, TEST_SCORE_TOLERANCE_DELTA);
+
+      float score3 = h3CustomMul.get(doc);
+      logResult("score3=", s, q3, doc, score3);
+      assertEquals("new score for custom mul", boost * fieldScore * score1, score3, TEST_SCORE_TOLERANCE_DELTA);
+      
+      float score4 = h4CustomAdd.get(doc);
+      logResult("score4=", s, q4, doc, score4);
+      assertEquals("new score for custom add", boost * (fieldScore + score1), score4, TEST_SCORE_TOLERANCE_DELTA);
+      
+      float score5 = h5CustomMulAdd.get(doc);
+      logResult("score5=", s, q5, doc, score5);
+      assertEquals("new score for custom mul add", boost * fieldScore * (score1 + fieldScore), score5, TEST_SCORE_TOLERANCE_DELTA);
+    }
+  }
+
+  private void logResult(String msg, Searcher s, Query q, int doc, float score1) throws IOException {
+    log(msg+" "+score1);
+    log("Explain by: "+q);
+    log(s.explain(q,doc));
+  }
+
+  // since custom scoring modifies the order of docs, map results 
+  // by doc ids so that we can later compare/verify them 
+  private Map<Integer,Float> topDocsToMap(TopDocs td) {
+    Map<Integer,Float> h = new HashMap<Integer,Float>();
+    for (int i=0; i<td.totalHits; i++) {
+      h.put(td.scoreDocs[i].doc, td.scoreDocs[i].score);
+    }
+    return h;
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/function/TestDocValues.java b/lucene/backwards/src/test/org/apache/lucene/search/function/TestDocValues.java
new file mode 100644
index 0000000..e20626b
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/function/TestDocValues.java
@@ -0,0 +1,113 @@
+package org.apache.lucene.search.function;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.Test;
+
+/**
+ * DocValues TestCase  
+ */
+public class TestDocValues extends LuceneTestCase {
+
+  @Test
+  public void testGetMinValue() {
+    float[] innerArray = new float[] { 1.0f, 2.0f, -1.0f, 100.0f };
+    DocValuesTestImpl docValues = new DocValuesTestImpl(innerArray);
+    assertEquals("-1.0f is the min value in the source array", -1.0f, docValues
+        .getMinValue(), 0);
+
+    // test with without values - NaN
+    innerArray = new float[] {};
+    docValues = new DocValuesTestImpl(innerArray);
+    assertTrue("max is NaN - no values in inner array", Float.isNaN(docValues
+        .getMinValue()));
+  }
+  @Test
+  public void testGetMaxValue() {
+    float[] innerArray = new float[] { 1.0f, 2.0f, -1.0f, 10.0f };
+    DocValuesTestImpl docValues = new DocValuesTestImpl(innerArray);
+    assertEquals("10.0f is the max value in the source array", 10.0f, docValues
+        .getMaxValue(), 0);
+
+    innerArray = new float[] { -3.0f, -1.0f, -100.0f };
+    docValues = new DocValuesTestImpl(innerArray);
+    assertEquals("-1.0f is the max value in the source array", -1.0f, docValues
+        .getMaxValue(), 0);
+
+    innerArray = new float[] { -3.0f, -1.0f, 100.0f, Float.MAX_VALUE,
+        Float.MAX_VALUE - 1 };
+    docValues = new DocValuesTestImpl(innerArray);
+    assertEquals(Float.MAX_VALUE + " is the max value in the source array",
+        Float.MAX_VALUE, docValues.getMaxValue(), 0);
+
+    // test with without values - NaN
+    innerArray = new float[] {};
+    docValues = new DocValuesTestImpl(innerArray);
+    assertTrue("max is NaN - no values in inner array", Float.isNaN(docValues
+        .getMaxValue()));
+  }
+
+  @Test
+  public void testGetAverageValue() {
+    float[] innerArray = new float[] { 1.0f, 1.0f, 1.0f, 1.0f };
+    DocValuesTestImpl docValues = new DocValuesTestImpl(innerArray);
+    assertEquals("the average is 1.0f", 1.0f, docValues.getAverageValue(), 0);
+
+    innerArray = new float[] { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f };
+    docValues = new DocValuesTestImpl(innerArray);
+    assertEquals("the average is 3.5f", 3.5f, docValues.getAverageValue(), 0);
+
+    // test with negative values
+    innerArray = new float[] { -1.0f, 2.0f };
+    docValues = new DocValuesTestImpl(innerArray);
+    assertEquals("the average is 0.5f", 0.5f, docValues.getAverageValue(), 0);
+
+    // test with without values - NaN
+    innerArray = new float[] {};
+    docValues = new DocValuesTestImpl(innerArray);
+    assertTrue("the average is NaN - no values in inner array", Float
+        .isNaN(docValues.getAverageValue()));
+  }
+
+  static class DocValuesTestImpl extends DocValues {
+    float[] innerArray;
+
+    DocValuesTestImpl(float[] innerArray) {
+      this.innerArray = innerArray;
+    }
+
+    /**
+     * @see org.apache.lucene.search.function.DocValues#floatVal(int)
+     */
+    @Override
+    public float floatVal(int doc) {
+      return innerArray[doc];
+    }
+
+    /**
+     * @see org.apache.lucene.search.function.DocValues#toString(int)
+     */
+    @Override
+    public String toString(int doc) {
+      return Integer.toString(doc);
+    }
+
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java
new file mode 100755
index 0000000..14a2dcf
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java
@@ -0,0 +1,245 @@
+package org.apache.lucene.search.function;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.HashMap;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TopDocs;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Test FieldScoreQuery search.
+ * <p>
+ * Tests here create an index with a few documents, each having
+ * an int value indexed  field and a float value indexed field.
+ * The values of these fields are later used for scoring.
+ * <p>
+ * The rank tests use Hits to verify that docs are ordered (by score) as expected.
+ * <p>
+ * The exact score tests use TopDocs top to verify the exact score.  
+ */
+public class TestFieldScoreQuery extends FunctionTestSetup {
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    createIndex(true);
+  }
+
+  /** Test that FieldScoreQuery of Type.BYTE returns docs in expected order. */
+  @Test
+  public void testRankByte () throws Exception {
+    // INT field values are small enough to be parsed as byte
+    doTestRank(INT_FIELD,FieldScoreQuery.Type.BYTE);
+  }
+
+  /** Test that FieldScoreQuery of Type.SHORT returns docs in expected order. */
+  @Test
+  public void testRankShort () throws Exception {
+    // INT field values are small enough to be parsed as short
+    doTestRank(INT_FIELD,FieldScoreQuery.Type.SHORT);
+  }
+
+  /** Test that FieldScoreQuery of Type.INT returns docs in expected order. */
+  @Test
+  public void testRankInt () throws Exception {
+    doTestRank(INT_FIELD,FieldScoreQuery.Type.INT);
+  }
+
+  /** Test that FieldScoreQuery of Type.FLOAT returns docs in expected order. */
+  @Test
+  public void testRankFloat () throws Exception {
+    // INT field can be parsed as float
+    doTestRank(INT_FIELD,FieldScoreQuery.Type.FLOAT);
+    // same values, but in flot format
+    doTestRank(FLOAT_FIELD,FieldScoreQuery.Type.FLOAT);
+  }
+
+  // Test that FieldScoreQuery returns docs in expected order.
+  private void doTestRank (String field, FieldScoreQuery.Type tp) throws Exception {
+    IndexSearcher s = new IndexSearcher(dir, true);
+    Query q = new FieldScoreQuery(field,tp);
+    log("test: "+q);
+    QueryUtils.check(random, q,s);
+    ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+    assertEquals("All docs should be matched!",N_DOCS,h.length);
+    String prevID = "ID"+(N_DOCS+1); // greater than all ids of docs in this test
+    for (int i=0; i<h.length; i++) {
+      String resID = s.doc(h[i].doc).get(ID_FIELD);
+      log(i+".   score="+h[i].score+"  -  "+resID);
+      log(s.explain(q,h[i].doc));
+      assertTrue("res id "+resID+" should be < prev res id "+prevID, resID.compareTo(prevID)<0);
+      prevID = resID;
+    }
+    s.close();
+  }
+
+  /** Test that FieldScoreQuery of Type.BYTE returns the expected scores. */
+  @Test
+  public void testExactScoreByte () throws Exception {
+    // INT field values are small enough to be parsed as byte
+    doTestExactScore(INT_FIELD,FieldScoreQuery.Type.BYTE);
+  }
+
+  /** Test that FieldScoreQuery of Type.SHORT returns the expected scores. */
+  @Test
+  public void testExactScoreShort () throws  Exception {
+    // INT field values are small enough to be parsed as short
+    doTestExactScore(INT_FIELD,FieldScoreQuery.Type.SHORT);
+  }
+
+  /** Test that FieldScoreQuery of Type.INT returns the expected scores. */
+  @Test
+  public void testExactScoreInt () throws  Exception {
+    doTestExactScore(INT_FIELD,FieldScoreQuery.Type.INT);
+  }
+
+  /** Test that FieldScoreQuery of Type.FLOAT returns the expected scores. */
+  @Test
+  public void testExactScoreFloat () throws  Exception {
+    // INT field can be parsed as float
+    doTestExactScore(INT_FIELD,FieldScoreQuery.Type.FLOAT);
+    // same values, but in flot format
+    doTestExactScore(FLOAT_FIELD,FieldScoreQuery.Type.FLOAT);
+  }
+
+  // Test that FieldScoreQuery returns docs with expected score.
+  private void doTestExactScore (String field, FieldScoreQuery.Type tp) throws Exception {
+    IndexSearcher s = new IndexSearcher(dir, true);
+    Query q = new FieldScoreQuery(field,tp);
+    TopDocs td = s.search(q,null,1000);
+    assertEquals("All docs should be matched!",N_DOCS,td.totalHits);
+    ScoreDoc sd[] = td.scoreDocs;
+    for (ScoreDoc aSd : sd) {
+      float score = aSd.score;
+      log(s.explain(q, aSd.doc));
+      String id = s.getIndexReader().document(aSd.doc).get(ID_FIELD);
+      float expectedScore = expectedFieldScore(id); // "ID7" --> 7.0
+      assertEquals("score of " + id + " shuould be " + expectedScore + " != " + score, expectedScore, score, TEST_SCORE_TOLERANCE_DELTA);
+    }
+    s.close();
+  }
+
+  /** Test that FieldScoreQuery of Type.BYTE caches/reuses loaded values and consumes the proper RAM resources. */
+  @Test
+  public void testCachingByte () throws  Exception {
+    // INT field values are small enough to be parsed as byte
+    doTestCaching(INT_FIELD,FieldScoreQuery.Type.BYTE);
+  }
+
+  /** Test that FieldScoreQuery of Type.SHORT caches/reuses loaded values and consumes the proper RAM resources. */
+  @Test
+  public void testCachingShort () throws  Exception {
+    // INT field values are small enough to be parsed as short
+    doTestCaching(INT_FIELD,FieldScoreQuery.Type.SHORT);
+  }
+
+  /** Test that FieldScoreQuery of Type.INT caches/reuses loaded values and consumes the proper RAM resources. */
+  @Test
+  public void testCachingInt () throws Exception {
+    doTestCaching(INT_FIELD,FieldScoreQuery.Type.INT);
+  }
+
+  /** Test that FieldScoreQuery of Type.FLOAT caches/reuses loaded values and consumes the proper RAM resources. */
+  @Test
+  public void testCachingFloat () throws  Exception {
+    // INT field values can be parsed as float
+    doTestCaching(INT_FIELD,FieldScoreQuery.Type.FLOAT);
+    // same values, but in flot format
+    doTestCaching(FLOAT_FIELD,FieldScoreQuery.Type.FLOAT);
+  }
+
+  // Test that values loaded for FieldScoreQuery are cached properly and consumes the proper RAM resources.
+  private void doTestCaching (String field, FieldScoreQuery.Type tp) throws Exception {
+    // prepare expected array types for comparison
+    HashMap<FieldScoreQuery.Type,Object> expectedArrayTypes = new HashMap<FieldScoreQuery.Type,Object>();
+    expectedArrayTypes.put(FieldScoreQuery.Type.BYTE, new byte[0]);
+    expectedArrayTypes.put(FieldScoreQuery.Type.SHORT, new short[0]);
+    expectedArrayTypes.put(FieldScoreQuery.Type.INT, new int[0]);
+    expectedArrayTypes.put(FieldScoreQuery.Type.FLOAT, new float[0]);
+    
+    IndexSearcher s = new IndexSearcher(dir, true);
+    Object[] innerArray = new Object[s.getIndexReader().getSequentialSubReaders().length];
+
+    boolean warned = false; // print warning once.
+    for (int i=0; i<10; i++) {
+      FieldScoreQuery q = new FieldScoreQuery(field,tp);
+      ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+      assertEquals("All docs should be matched!",N_DOCS,h.length);
+      IndexReader[] readers = s.getIndexReader().getSequentialSubReaders();
+      for (int j = 0; j < readers.length; j++) {
+        IndexReader reader = readers[j];
+        try {
+          if (i == 0) {
+            innerArray[j] = q.valSrc.getValues(reader).getInnerArray();
+            log(i + ".  compare: " + innerArray[j].getClass() + " to "
+                + expectedArrayTypes.get(tp).getClass());
+            assertEquals(
+                "field values should be cached in the correct array type!",
+                innerArray[j].getClass(), expectedArrayTypes.get(tp).getClass());
+          } else {
+            log(i + ".  compare: " + innerArray[j] + " to "
+                + q.valSrc.getValues(reader).getInnerArray());
+            assertSame("field values should be cached and reused!", innerArray[j],
+                q.valSrc.getValues(reader).getInnerArray());
+          }
+        } catch (UnsupportedOperationException e) {
+          if (!warned) {
+            System.err.println("WARNING: " + testName()
+                + " cannot fully test values of " + q);
+            warned = true;
+          }
+        }
+      }
+    }
+    s.close();
+    // verify new values are reloaded (not reused) for a new reader
+    s = new IndexSearcher(dir, true);
+    FieldScoreQuery q = new FieldScoreQuery(field,tp);
+    ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+    assertEquals("All docs should be matched!",N_DOCS,h.length);
+    IndexReader[] readers = s.getIndexReader().getSequentialSubReaders();
+    for (int j = 0; j < readers.length; j++) {
+      IndexReader reader = readers[j];
+      try {
+        log("compare: " + innerArray + " to "
+            + q.valSrc.getValues(reader).getInnerArray());
+        assertNotSame(
+            "cached field values should not be reused if reader as changed!",
+            innerArray, q.valSrc.getValues(reader).getInnerArray());
+      } catch (UnsupportedOperationException e) {
+        if (!warned) {
+          System.err.println("WARNING: " + testName()
+              + " cannot fully test values of " + q);
+          warned = true;
+        }
+      }
+    }
+    s.close();
+  }
+
+  private String testName() {
+    return getClass().getName()+"."+ getName();
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/function/TestOrdValues.java b/lucene/backwards/src/test/org/apache/lucene/search/function/TestOrdValues.java
new file mode 100644
index 0000000..2353c8c
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/function/TestOrdValues.java
@@ -0,0 +1,266 @@
+package org.apache.lucene.search.function;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.CorruptIndexException;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.*;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Test search based on OrdFieldSource and ReverseOrdFieldSource.
+ * <p/>
+ * Tests here create an index with a few documents, each having
+ * an indexed "id" field.
+ * The ord values of this field are later used for scoring.
+ * <p/>
+ * The order tests use Hits to verify that docs are ordered as expected.
+ * <p/>
+ * The exact score tests use TopDocs top to verify the exact score.
+ */
+public class TestOrdValues extends FunctionTestSetup {
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    createIndex(false);
+  }
+
+  /**
+   * Test OrdFieldSource
+   */
+  @Test
+  public void testOrdFieldRank() throws CorruptIndexException, Exception {
+    doTestRank(ID_FIELD, true);
+  }
+
+  /**
+   * Test ReverseOrdFieldSource
+   */
+  @Test
+  public void testReverseOrdFieldRank() throws CorruptIndexException, Exception {
+    doTestRank(ID_FIELD, false);
+  }
+
+  // Test that queries based on reverse/ordFieldScore scores correctly
+  private void doTestRank(String field, boolean inOrder) throws CorruptIndexException, Exception {
+    IndexSearcher s = new IndexSearcher(dir, true);
+    ValueSource vs;
+    if (inOrder) {
+      vs = new OrdFieldSource(field);
+    } else {
+      vs = new ReverseOrdFieldSource(field);
+    }
+
+    Query q = new ValueSourceQuery(vs);
+    log("test: " + q);
+    QueryUtils.check(random, q, s);
+    ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+    assertEquals("All docs should be matched!", N_DOCS, h.length);
+    String prevID = inOrder
+            ? "IE"   // greater than all ids of docs in this test ("ID0001", etc.)
+            : "IC";  // smaller than all ids of docs in this test ("ID0001", etc.)
+
+    for (int i = 0; i < h.length; i++) {
+      String resID = s.doc(h[i].doc).get(ID_FIELD);
+      log(i + ".   score=" + h[i].score + "  -  " + resID);
+      log(s.explain(q, h[i].doc));
+      if (inOrder) {
+        assertTrue("res id " + resID + " should be < prev res id " + prevID, resID.compareTo(prevID) < 0);
+      } else {
+        assertTrue("res id " + resID + " should be > prev res id " + prevID, resID.compareTo(prevID) > 0);
+      }
+      prevID = resID;
+    }
+    s.close();
+  }
+
+  /**
+   * Test exact score for OrdFieldSource
+   */
+  @Test
+  public void testOrdFieldExactScore() throws CorruptIndexException, Exception {
+    doTestExactScore(ID_FIELD, true);
+  }
+
+  /**
+   * Test exact score for ReverseOrdFieldSource
+   */
+  @Test
+  public void testReverseOrdFieldExactScore() throws CorruptIndexException, Exception {
+    doTestExactScore(ID_FIELD, false);
+  }
+
+
+  // Test that queries based on reverse/ordFieldScore returns docs with expected score.
+  private void doTestExactScore(String field, boolean inOrder) throws CorruptIndexException, Exception {
+    IndexSearcher s = new IndexSearcher(dir, true);
+    ValueSource vs;
+    if (inOrder) {
+      vs = new OrdFieldSource(field);
+    } else {
+      vs = new ReverseOrdFieldSource(field);
+    }
+    Query q = new ValueSourceQuery(vs);
+    TopDocs td = s.search(q, null, 1000);
+    assertEquals("All docs should be matched!", N_DOCS, td.totalHits);
+    ScoreDoc sd[] = td.scoreDocs;
+    for (int i = 0; i < sd.length; i++) {
+      float score = sd[i].score;
+      String id = s.getIndexReader().document(sd[i].doc).get(ID_FIELD);
+      log("-------- " + i + ". Explain doc " + id);
+      log(s.explain(q, sd[i].doc));
+      float expectedScore = N_DOCS - i;
+      assertEquals("score of result " + i + " shuould be " + expectedScore + " != " + score, expectedScore, score, TEST_SCORE_TOLERANCE_DELTA);
+      String expectedId = inOrder
+              ? id2String(N_DOCS - i) // in-order ==> larger  values first
+              : id2String(i + 1);     // reverse  ==> smaller values first
+      assertTrue("id of result " + i + " shuould be " + expectedId + " != " + score, expectedId.equals(id));
+    }
+    s.close();
+  }
+
+  /**
+   * Test caching OrdFieldSource
+   */
+  @Test
+  public void testCachingOrd() throws CorruptIndexException, Exception {
+    doTestCaching(ID_FIELD, true);
+  }
+
+  /**
+   * Test caching for ReverseOrdFieldSource
+   */
+  @Test
+  public void testCachingReverseOrd() throws CorruptIndexException, Exception {
+    doTestCaching(ID_FIELD, false);
+  }
+
+  // Test that values loaded for FieldScoreQuery are cached properly and consumes the proper RAM resources.
+  private void doTestCaching(String field, boolean inOrder) throws CorruptIndexException, Exception {
+    IndexSearcher s = new IndexSearcher(dir, true);
+    Object innerArray = null;
+
+    boolean warned = false; // print warning once
+
+    for (int i = 0; i < 10; i++) {
+      ValueSource vs;
+      if (inOrder) {
+        vs = new OrdFieldSource(field);
+      } else {
+        vs = new ReverseOrdFieldSource(field);
+      }
+      ValueSourceQuery q = new ValueSourceQuery(vs);
+      ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+      try {
+        assertEquals("All docs should be matched!", N_DOCS, h.length);
+        IndexReader[] readers = s.getIndexReader().getSequentialSubReaders();
+
+        for (IndexReader reader : readers) {
+          if (i == 0) {
+            innerArray = q.valSrc.getValues(reader).getInnerArray();
+          } else {
+            log(i + ".  compare: " + innerArray + " to " + q.valSrc.getValues(reader).getInnerArray());
+            assertSame("field values should be cached and reused!", innerArray, q.valSrc.getValues(reader).getInnerArray());
+          }
+        }
+      } catch (UnsupportedOperationException e) {
+        if (!warned) {
+          System.err.println("WARNING: " + testName() + " cannot fully test values of " + q);
+          warned = true;
+        }
+      }
+    }
+
+    ValueSource vs;
+    ValueSourceQuery q;
+    ScoreDoc[] h;
+
+    // verify that different values are loaded for a different field
+    String field2 = INT_FIELD;
+    assertFalse(field.equals(field2)); // otherwise this test is meaningless.
+    if (inOrder) {
+      vs = new OrdFieldSource(field2);
+    } else {
+      vs = new ReverseOrdFieldSource(field2);
+    }
+    q = new ValueSourceQuery(vs);
+    h = s.search(q, null, 1000).scoreDocs;
+    assertEquals("All docs should be matched!", N_DOCS, h.length);
+    IndexReader[] readers = s.getIndexReader().getSequentialSubReaders();
+
+    for (IndexReader reader : readers) {
+      try {
+        log("compare (should differ): " + innerArray + " to "
+                + q.valSrc.getValues(reader).getInnerArray());
+        assertNotSame(
+                "different values shuold be loaded for a different field!",
+                innerArray, q.valSrc.getValues(reader).getInnerArray());
+      } catch (UnsupportedOperationException e) {
+        if (!warned) {
+          System.err.println("WARNING: " + testName()
+                  + " cannot fully test values of " + q);
+          warned = true;
+        }
+      }
+    }
+    s.close();
+    // verify new values are reloaded (not reused) for a new reader
+    s = new IndexSearcher(dir, true);
+    if (inOrder) {
+      vs = new OrdFieldSource(field);
+    } else {
+      vs = new ReverseOrdFieldSource(field);
+    }
+    q = new ValueSourceQuery(vs);
+    h = s.search(q, null, 1000).scoreDocs;
+    assertEquals("All docs should be matched!", N_DOCS, h.length);
+    readers = s.getIndexReader().getSequentialSubReaders();
+
+    for (IndexReader reader : readers) {
+      try {
+        log("compare (should differ): " + innerArray + " to "
+                + q.valSrc.getValues(reader).getInnerArray());
+        assertNotSame(
+                "cached field values should not be reused if reader as changed!",
+                innerArray, q.valSrc.getValues(reader).getInnerArray());
+      } catch (UnsupportedOperationException e) {
+        if (!warned) {
+          System.err.println("WARNING: " + testName()
+                  + " cannot fully test values of " + q);
+          warned = true;
+        }
+      }
+    }
+    s.close();
+  }
+
+  private String testName() {
+    return getClass().getName() + "." + getName();
+  }
+
+  // LUCENE-1250
+  public void testEqualsNull() throws Exception {
+    OrdFieldSource ofs = new OrdFieldSource("f");
+    assertFalse(ofs.equals(null));
+    
+    ReverseOrdFieldSource rofs = new ReverseOrdFieldSource("f");
+    assertFalse(rofs.equals(null));
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/payloads/PayloadHelper.java b/lucene/backwards/src/test/org/apache/lucene/search/payloads/PayloadHelper.java
new file mode 100644
index 0000000..91eef1c
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/payloads/PayloadHelper.java
@@ -0,0 +1,139 @@
+package org.apache.lucene.search.payloads;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.Random;
+
+import org.apache.lucene.analysis.*;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.Payload;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.English;
+
+import static org.apache.lucene.util.LuceneTestCase.TEST_VERSION_CURRENT;
+
+/**
+ *
+ *
+ **/
+public class PayloadHelper {
+
+  private byte[] payloadField = new byte[]{1};
+  private byte[] payloadMultiField1 = new byte[]{2};
+  private byte[] payloadMultiField2 = new byte[]{4};
+  public static final String NO_PAYLOAD_FIELD = "noPayloadField";
+  public static final String MULTI_FIELD = "multiField";
+  public static final String FIELD = "field";
+
+  public IndexReader reader;
+
+  public final class PayloadAnalyzer extends Analyzer {
+
+
+    @Override
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      TokenStream result = new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader);
+      result = new PayloadFilter(result, fieldName);
+      return result;
+    }
+  }
+
+  public final class PayloadFilter extends TokenFilter {
+    private final String fieldName;
+    private int numSeen = 0;
+    private final PayloadAttribute payloadAtt;
+    
+    public PayloadFilter(TokenStream input, String fieldName) {
+      super(input);
+      this.fieldName = fieldName;
+      payloadAtt = addAttribute(PayloadAttribute.class);
+    }
+
+    @Override
+    public boolean incrementToken() throws IOException {
+      
+      if (input.incrementToken()) {
+        if (fieldName.equals(FIELD)) {
+          payloadAtt.setPayload(new Payload(payloadField));
+        } else if (fieldName.equals(MULTI_FIELD)) {
+          if (numSeen  % 2 == 0) {
+            payloadAtt.setPayload(new Payload(payloadMultiField1));
+          }
+          else {
+            payloadAtt.setPayload(new Payload(payloadMultiField2));
+          }
+          numSeen++;
+        }
+        return true;
+      }
+      return false;
+    }
+
+    @Override
+    public void reset() throws IOException {
+      super.reset();
+      this.numSeen = 0;
+    }
+  }
+
+  /**
+   * Sets up a RAMDirectory, and adds documents (using English.intToEnglish()) with two fields: field and multiField
+   * and analyzes them using the PayloadAnalyzer
+   * @param similarity The Similarity class to use in the Searcher
+   * @param numDocs The num docs to add
+   * @return An IndexSearcher
+   * @throws IOException
+   */
+  // TODO: randomize
+  public IndexSearcher setUp(Random random, Similarity similarity, int numDocs) throws IOException {
+    Directory directory = new MockDirectoryWrapper(random, new RAMDirectory());
+    PayloadAnalyzer analyzer = new PayloadAnalyzer();
+    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, analyzer).setSimilarity(similarity));
+    // writer.infoStream = System.out;
+    for (int i = 0; i < numDocs; i++) {
+      Document doc = new Document();
+      doc.add(new Field(FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
+      doc.add(new Field(MULTI_FIELD, English.intToEnglish(i) + "  " + English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
+      doc.add(new Field(NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+    reader = IndexReader.open(writer, true);
+    writer.close();
+
+    IndexSearcher searcher = LuceneTestCase.newSearcher(reader);
+    searcher.setSimilarity(similarity);
+    return searcher;
+  }
+
+  public void tearDown() throws Exception {
+    reader.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
new file mode 100644
index 0000000..65d088c
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
@@ -0,0 +1,351 @@
+package org.apache.lucene.search.payloads;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+import java.io.Reader;
+import java.util.Collection;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.LowerCaseTokenizer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.FieldInvertState;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Payload;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.search.spans.SpanNearQuery;
+import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.English;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.search.Explanation.IDFExplanation;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+
+public class TestPayloadNearQuery extends LuceneTestCase {
+  private static IndexSearcher searcher;
+  private static IndexReader reader;
+  private static Directory directory;
+  private static BoostingSimilarity similarity = new BoostingSimilarity();
+  private static byte[] payload2 = new byte[]{2};
+  private static byte[] payload4 = new byte[]{4};
+
+  private static class PayloadAnalyzer extends Analyzer {
+    @Override
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      TokenStream result = new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader);
+      result = new PayloadFilter(result, fieldName);
+      return result;
+    }
+  }
+
+  private static class PayloadFilter extends TokenFilter {
+    private final String fieldName;
+    private int numSeen = 0;
+    private final PayloadAttribute payAtt;
+
+    public PayloadFilter(TokenStream input, String fieldName) {
+      super(input);
+      this.fieldName = fieldName;
+      payAtt = addAttribute(PayloadAttribute.class);
+    }
+
+    @Override
+    public boolean incrementToken() throws IOException {
+      boolean result = false;
+      if (input.incrementToken()) {
+        if (numSeen % 2 == 0) {
+          payAtt.setPayload(new Payload(payload2));
+        } else {
+          payAtt.setPayload(new Payload(payload4));
+        }
+        numSeen++;
+        result = true;
+      }
+      return result;
+    }
+
+    @Override
+    public void reset() throws IOException {
+      super.reset();
+      this.numSeen = 0;
+    }
+  }
+  
+  private PayloadNearQuery newPhraseQuery (String fieldName, String phrase, boolean inOrder, PayloadFunction function ) {
+    String[] words = phrase.split("[\\s]+");
+    SpanQuery clauses[] = new SpanQuery[words.length];
+    for (int i=0;i<clauses.length;i++) {
+      clauses[i] = new SpanTermQuery(new Term(fieldName, words[i]));  
+    } 
+    return new PayloadNearQuery(clauses, 0, inOrder, function);
+  }
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory, 
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer())
+        .setSimilarity(similarity));
+    //writer.infoStream = System.out;
+    for (int i = 0; i < 1000; i++) {
+      Document doc = new Document();
+      doc.add(newField("field", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
+      String txt = English.intToEnglish(i) +' '+English.intToEnglish(i+1);
+      doc.add(newField("field2",  txt, Field.Store.YES, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+    reader = writer.getReader();
+    writer.close();
+
+    searcher = newSearcher(reader);
+    searcher.setSimilarity(similarity);
+  }
+
+  @AfterClass
+  public static void afterClass() throws Exception {
+    searcher.close();
+    searcher = null;
+    reader.close();
+    reader = null;
+    directory.close();
+    directory = null;
+  }
+
+  public void test() throws IOException {
+    PayloadNearQuery query;
+    TopDocs hits;
+
+    query = newPhraseQuery("field", "twenty two", true, new AveragePayloadFunction());
+    QueryUtils.check(query);
+		
+    // all 10 hits should have score = 3 because adjacent terms have payloads of 2,4
+    // and all the similarity factors are set to 1
+    hits = searcher.search(query, null, 100);
+    assertTrue("hits is null and it shouldn't be", hits != null);
+    assertTrue("should be 10 hits", hits.totalHits == 10);
+    for (int j = 0; j < hits.scoreDocs.length; j++) {
+      ScoreDoc doc = hits.scoreDocs[j];
+      assertTrue(doc.score + " does not equal: " + 3, doc.score == 3);
+    }
+    for (int i=1;i<10;i++) {
+      query = newPhraseQuery("field", English.intToEnglish(i)+" hundred", true, new AveragePayloadFunction());
+      // all should have score = 3 because adjacent terms have payloads of 2,4
+      // and all the similarity factors are set to 1
+      hits = searcher.search(query, null, 100);
+      assertTrue("hits is null and it shouldn't be", hits != null);
+      assertTrue("should be 100 hits", hits.totalHits == 100);
+      for (int j = 0; j < hits.scoreDocs.length; j++) {
+        ScoreDoc doc = hits.scoreDocs[j];
+        //				System.out.println("Doc: " + doc.toString());
+        //				System.out.println("Explain: " + searcher.explain(query, doc.doc));
+        assertTrue(doc.score + " does not equal: " + 3, doc.score == 3);
+      }
+    }
+  }
+
+
+  public void testPayloadNear() throws IOException {
+    SpanNearQuery q1, q2;
+    PayloadNearQuery query;
+    //SpanNearQuery(clauses, 10000, false)
+    q1 = spanNearQuery("field2", "twenty two");
+    q2 = spanNearQuery("field2", "twenty three");
+    SpanQuery[] clauses = new SpanQuery[2];
+    clauses[0] = q1;
+    clauses[1] = q2;
+    query = new PayloadNearQuery(clauses, 10, false); 
+    //System.out.println(query.toString());
+    assertEquals(12, searcher.search(query, null, 100).totalHits);
+    /*
+    System.out.println(hits.totalHits);
+    for (int j = 0; j < hits.scoreDocs.length; j++) {
+      ScoreDoc doc = hits.scoreDocs[j];
+      System.out.println("doc: "+doc.doc+", score: "+doc.score);
+    }
+    */
+  }
+  
+  public void testAverageFunction() throws IOException {
+	  PayloadNearQuery query;
+	  TopDocs hits;
+
+	  query = newPhraseQuery("field", "twenty two", true, new AveragePayloadFunction());
+	  QueryUtils.check(query);
+	  // all 10 hits should have score = 3 because adjacent terms have payloads of 2,4
+	  // and all the similarity factors are set to 1
+	  hits = searcher.search(query, null, 100);
+	  assertTrue("hits is null and it shouldn't be", hits != null);
+	  assertTrue("should be 10 hits", hits.totalHits == 10);
+	  for (int j = 0; j < hits.scoreDocs.length; j++) {
+		  ScoreDoc doc = hits.scoreDocs[j];
+		  assertTrue(doc.score + " does not equal: " + 3, doc.score == 3);
+		  Explanation explain = searcher.explain(query, hits.scoreDocs[j].doc);
+		  String exp = explain.toString();
+		  assertTrue(exp, exp.indexOf("AveragePayloadFunction") > -1);
+		  assertTrue(hits.scoreDocs[j].score + " explain value does not equal: " + 3, explain.getValue() == 3f);
+	  }
+  }
+  public void testMaxFunction() throws IOException {
+	  PayloadNearQuery query;
+	  TopDocs hits;
+
+	  query = newPhraseQuery("field", "twenty two", true, new MaxPayloadFunction());
+	  QueryUtils.check(query);
+	  // all 10 hits should have score = 4 (max payload value)
+	  hits = searcher.search(query, null, 100);
+	  assertTrue("hits is null and it shouldn't be", hits != null);
+	  assertTrue("should be 10 hits", hits.totalHits == 10);
+	  for (int j = 0; j < hits.scoreDocs.length; j++) {
+		  ScoreDoc doc = hits.scoreDocs[j];
+		  assertTrue(doc.score + " does not equal: " + 4, doc.score == 4);
+		  Explanation explain = searcher.explain(query, hits.scoreDocs[j].doc);
+		  String exp = explain.toString();
+		  assertTrue(exp, exp.indexOf("MaxPayloadFunction") > -1);
+		  assertTrue(hits.scoreDocs[j].score + " explain value does not equal: " + 4, explain.getValue() == 4f);
+	  }
+  }  
+  public void testMinFunction() throws IOException {
+	  PayloadNearQuery query;
+	  TopDocs hits;
+
+	  query = newPhraseQuery("field", "twenty two", true, new MinPayloadFunction());
+	  QueryUtils.check(query);
+	  // all 10 hits should have score = 2 (min payload value)
+	  hits = searcher.search(query, null, 100);
+	  assertTrue("hits is null and it shouldn't be", hits != null);
+	  assertTrue("should be 10 hits", hits.totalHits == 10);
+	  for (int j = 0; j < hits.scoreDocs.length; j++) {
+		  ScoreDoc doc = hits.scoreDocs[j];
+		  assertTrue(doc.score + " does not equal: " + 2, doc.score == 2);
+		  Explanation explain = searcher.explain(query, hits.scoreDocs[j].doc);
+		  String exp = explain.toString();
+		  assertTrue(exp, exp.indexOf("MinPayloadFunction") > -1);
+		  assertTrue(hits.scoreDocs[j].score + " explain value does not equal: " + 2, explain.getValue() == 2f);
+	  }
+  }  
+  private SpanQuery[] getClauses() {
+	    SpanNearQuery q1, q2;
+	    q1 = spanNearQuery("field2", "twenty two");
+	    q2 = spanNearQuery("field2", "twenty three");
+	    SpanQuery[] clauses = new SpanQuery[2];
+	    clauses[0] = q1;
+	    clauses[1] = q2;
+	    return clauses;
+  }
+  private SpanNearQuery spanNearQuery(String fieldName, String words) {
+    String[] wordList = words.split("[\\s]+");
+    SpanQuery clauses[] = new SpanQuery[wordList.length];
+    for (int i=0;i<clauses.length;i++) {
+      clauses[i] = new PayloadTermQuery(new Term(fieldName, wordList[i]), new AveragePayloadFunction());  
+    } 
+    return new SpanNearQuery(clauses, 10000, false);
+  }
+
+  public void testLongerSpan() throws IOException {
+    PayloadNearQuery query;
+    TopDocs hits;
+    query = newPhraseQuery("field", "nine hundred ninety nine", true, new AveragePayloadFunction());
+    hits = searcher.search(query, null, 100);
+    assertTrue("hits is null and it shouldn't be", hits != null);
+    ScoreDoc doc = hits.scoreDocs[0];
+    //		System.out.println("Doc: " + doc.toString());
+    //		System.out.println("Explain: " + searcher.explain(query, doc.doc));
+    assertTrue("there should only be one hit", hits.totalHits == 1);
+    // should have score = 3 because adjacent terms have payloads of 2,4
+    assertTrue(doc.score + " does not equal: " + 3, doc.score == 3); 
+  }
+
+  public void testComplexNested() throws IOException {
+    PayloadNearQuery query;
+    TopDocs hits;
+
+    // combine ordered and unordered spans with some nesting to make sure all payloads are counted
+
+    SpanQuery q1 = newPhraseQuery("field", "nine hundred", true, new AveragePayloadFunction());
+    SpanQuery q2 = newPhraseQuery("field", "ninety nine", true, new AveragePayloadFunction());
+    SpanQuery q3 = newPhraseQuery("field", "nine ninety", false, new AveragePayloadFunction());
+    SpanQuery q4 = newPhraseQuery("field", "hundred nine", false, new AveragePayloadFunction());
+    SpanQuery[]clauses = new SpanQuery[] {new PayloadNearQuery(new SpanQuery[] {q1,q2}, 0, true), new PayloadNearQuery(new SpanQuery[] {q3,q4}, 0, false)};
+    query = new PayloadNearQuery(clauses, 0, false);
+    hits = searcher.search(query, null, 100);
+    assertTrue("hits is null and it shouldn't be", hits != null);
+    // should be only 1 hit - doc 999
+    assertTrue("should only be one hit", hits.scoreDocs.length == 1);
+    // the score should be 3 - the average of all the underlying payloads
+    ScoreDoc doc = hits.scoreDocs[0];
+    //		System.out.println("Doc: " + doc.toString());
+    //		System.out.println("Explain: " + searcher.explain(query, doc.doc));
+    assertTrue(doc.score + " does not equal: " + 3, doc.score == 3);  
+  }
+
+  // must be static for weight serialization tests 
+  static class BoostingSimilarity extends DefaultSimilarity {
+
+    @Override public float scorePayload(int docId, String fieldName, int start, int end, byte[] payload, int offset, int length) {
+      //we know it is size 4 here, so ignore the offset/length
+      return payload[0];
+    }
+    //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+    //Make everything else 1 so we see the effect of the payload
+    //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+    @Override public float computeNorm(String fieldName, FieldInvertState state) {
+      return state.getBoost();
+    }
+
+    @Override public float queryNorm(float sumOfSquaredWeights) {
+      return 1.0f;
+    }
+
+    @Override public float sloppyFreq(int distance) {
+      return 1.0f;
+    }
+
+    @Override public float coord(int overlap, int maxOverlap) {
+      return 1.0f;
+    }
+    @Override public float tf(float freq) {
+      return 1.0f;
+    }
+    // idf used for phrase queries
+    @Override public IDFExplanation idfExplain(Collection<Term> terms, Searcher searcher) throws IOException {
+      return new IDFExplanation() {
+        @Override
+        public float getIdf() {
+          return 1.0f;
+        }
+        @Override
+        public String explain() {
+          return "Inexplicable";
+        }
+      };
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java
new file mode 100644
index 0000000..eafd74e
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java
@@ -0,0 +1,345 @@
+package org.apache.lucene.search.payloads;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.English;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.CheckHits;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.search.spans.Spans;
+import org.apache.lucene.search.spans.TermSpans;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.LowerCaseTokenizer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.index.FieldInvertState;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Payload;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+
+import java.io.Reader;
+import java.io.IOException;
+
+
+/**
+ *
+ *
+ **/
+public class TestPayloadTermQuery extends LuceneTestCase {
+  private IndexSearcher searcher;
+  private IndexReader reader;
+  private BoostingSimilarity similarity = new BoostingSimilarity();
+  private byte[] payloadField = new byte[]{1};
+  private byte[] payloadMultiField1 = new byte[]{2};
+  private byte[] payloadMultiField2 = new byte[]{4};
+  protected Directory directory;
+
+  private class PayloadAnalyzer extends Analyzer {
+
+
+    @Override
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      TokenStream result = new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader);
+      result = new PayloadFilter(result, fieldName);
+      return result;
+    }
+  }
+
+  private class PayloadFilter extends TokenFilter {
+    private final String fieldName;
+    private int numSeen = 0;
+    
+    private final PayloadAttribute payloadAtt;
+    
+    public PayloadFilter(TokenStream input, String fieldName) {
+      super(input);
+      this.fieldName = fieldName;
+      payloadAtt = addAttribute(PayloadAttribute.class);
+    }
+    
+    @Override
+    public boolean incrementToken() throws IOException {
+      boolean hasNext = input.incrementToken();
+      if (hasNext) {
+        if (fieldName.equals("field")) {
+          payloadAtt.setPayload(new Payload(payloadField));
+        } else if (fieldName.equals("multiField")) {
+          if (numSeen % 2 == 0) {
+            payloadAtt.setPayload(new Payload(payloadMultiField1));
+          } else {
+            payloadAtt.setPayload(new Payload(payloadMultiField2));
+          }
+          numSeen++;
+        }
+        return true;
+      } else {
+        return false;
+      }
+    }
+
+    @Override
+    public void reset() throws IOException {
+      super.reset();
+      this.numSeen = 0;
+    }
+  }
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory, 
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer())
+        .setSimilarity(similarity).setMergePolicy(newLogMergePolicy()));
+    //writer.infoStream = System.out;
+    for (int i = 0; i < 1000; i++) {
+      Document doc = new Document();
+      Field noPayloadField = newField(PayloadHelper.NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED);
+      //noPayloadField.setBoost(0);
+      doc.add(noPayloadField);
+      doc.add(newField("field", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
+      doc.add(newField("multiField", English.intToEnglish(i) + "  " + English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+    reader = writer.getReader();
+    writer.close();
+
+    searcher = newSearcher(reader);
+    searcher.setSimilarity(similarity);
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    searcher.close();
+    reader.close();
+    directory.close();
+    super.tearDown();
+  }
+
+  public void test() throws IOException {
+    PayloadTermQuery query = new PayloadTermQuery(new Term("field", "seventy"),
+            new MaxPayloadFunction());
+    TopDocs hits = searcher.search(query, null, 100);
+    assertTrue("hits is null and it shouldn't be", hits != null);
+    assertTrue("hits Size: " + hits.totalHits + " is not: " + 100, hits.totalHits == 100);
+
+    //they should all have the exact same score, because they all contain seventy once, and we set
+    //all the other similarity factors to be 1
+
+    assertTrue(hits.getMaxScore() + " does not equal: " + 1, hits.getMaxScore() == 1);
+    for (int i = 0; i < hits.scoreDocs.length; i++) {
+      ScoreDoc doc = hits.scoreDocs[i];
+      assertTrue(doc.score + " does not equal: " + 1, doc.score == 1);
+    }
+    CheckHits.checkExplanations(query, PayloadHelper.FIELD, searcher, true);
+    Spans spans = query.getSpans(searcher.getIndexReader());
+    assertTrue("spans is null and it shouldn't be", spans != null);
+    assertTrue("spans is not an instanceof " + TermSpans.class, spans instanceof TermSpans);
+    /*float score = hits.score(0);
+    for (int i =1; i < hits.length(); i++)
+    {
+      assertTrue("scores are not equal and they should be", score == hits.score(i));
+    }*/
+
+  }
+  
+  public void testQuery() {
+    PayloadTermQuery boostingFuncTermQuery = new PayloadTermQuery(new Term(PayloadHelper.MULTI_FIELD, "seventy"),
+        new MaxPayloadFunction());
+    QueryUtils.check(boostingFuncTermQuery);
+    
+    SpanTermQuery spanTermQuery = new SpanTermQuery(new Term(PayloadHelper.MULTI_FIELD, "seventy"));
+
+    assertTrue(boostingFuncTermQuery.equals(spanTermQuery) == spanTermQuery.equals(boostingFuncTermQuery));
+    
+    PayloadTermQuery boostingFuncTermQuery2 = new PayloadTermQuery(new Term(PayloadHelper.MULTI_FIELD, "seventy"),
+        new AveragePayloadFunction());
+    
+    QueryUtils.checkUnequal(boostingFuncTermQuery, boostingFuncTermQuery2);
+  }
+
+  public void testMultipleMatchesPerDoc() throws Exception {
+    PayloadTermQuery query = new PayloadTermQuery(new Term(PayloadHelper.MULTI_FIELD, "seventy"),
+            new MaxPayloadFunction());
+    TopDocs hits = searcher.search(query, null, 100);
+    assertTrue("hits is null and it shouldn't be", hits != null);
+    assertTrue("hits Size: " + hits.totalHits + " is not: " + 100, hits.totalHits == 100);
+
+    //they should all have the exact same score, because they all contain seventy once, and we set
+    //all the other similarity factors to be 1
+
+    //System.out.println("Hash: " + seventyHash + " Twice Hash: " + 2*seventyHash);
+    assertTrue(hits.getMaxScore() + " does not equal: " + 4.0, hits.getMaxScore() == 4.0);
+    //there should be exactly 10 items that score a 4, all the rest should score a 2
+    //The 10 items are: 70 + i*100 where i in [0-9]
+    int numTens = 0;
+    for (int i = 0; i < hits.scoreDocs.length; i++) {
+      ScoreDoc doc = hits.scoreDocs[i];
+      if (doc.doc % 10 == 0) {
+        numTens++;
+        assertTrue(doc.score + " does not equal: " + 4.0, doc.score == 4.0);
+      } else {
+        assertTrue(doc.score + " does not equal: " + 2, doc.score == 2);
+      }
+    }
+    assertTrue(numTens + " does not equal: " + 10, numTens == 10);
+    CheckHits.checkExplanations(query, "field", searcher, true);
+    Spans spans = query.getSpans(searcher.getIndexReader());
+    assertTrue("spans is null and it shouldn't be", spans != null);
+    assertTrue("spans is not an instanceof " + TermSpans.class, spans instanceof TermSpans);
+    //should be two matches per document
+    int count = 0;
+    //100 hits times 2 matches per hit, we should have 200 in count
+    while (spans.next()) {
+      count++;
+    }
+    assertTrue(count + " does not equal: " + 200, count == 200);
+  }
+
+  //Set includeSpanScore to false, in which case just the payload score comes through.
+  public void testIgnoreSpanScorer() throws Exception {
+    PayloadTermQuery query = new PayloadTermQuery(new Term(PayloadHelper.MULTI_FIELD, "seventy"),
+            new MaxPayloadFunction(), false);
+
+    IndexSearcher theSearcher = new IndexSearcher(directory, true);
+    theSearcher.setSimilarity(new FullSimilarity());
+    TopDocs hits = searcher.search(query, null, 100);
+    assertTrue("hits is null and it shouldn't be", hits != null);
+    assertTrue("hits Size: " + hits.totalHits + " is not: " + 100, hits.totalHits == 100);
+
+    //they should all have the exact same score, because they all contain seventy once, and we set
+    //all the other similarity factors to be 1
+
+    //System.out.println("Hash: " + seventyHash + " Twice Hash: " + 2*seventyHash);
+    assertTrue(hits.getMaxScore() + " does not equal: " + 4.0, hits.getMaxScore() == 4.0);
+    //there should be exactly 10 items that score a 4, all the rest should score a 2
+    //The 10 items are: 70 + i*100 where i in [0-9]
+    int numTens = 0;
+    for (int i = 0; i < hits.scoreDocs.length; i++) {
+      ScoreDoc doc = hits.scoreDocs[i];
+      if (doc.doc % 10 == 0) {
+        numTens++;
+        assertTrue(doc.score + " does not equal: " + 4.0, doc.score == 4.0);
+      } else {
+        assertTrue(doc.score + " does not equal: " + 2, doc.score == 2);
+      }
+    }
+    assertTrue(numTens + " does not equal: " + 10, numTens == 10);
+    CheckHits.checkExplanations(query, "field", searcher, true);
+    Spans spans = query.getSpans(searcher.getIndexReader());
+    assertTrue("spans is null and it shouldn't be", spans != null);
+    assertTrue("spans is not an instanceof " + TermSpans.class, spans instanceof TermSpans);
+    //should be two matches per document
+    int count = 0;
+    //100 hits times 2 matches per hit, we should have 200 in count
+    while (spans.next()) {
+      count++;
+    }
+    theSearcher.close();
+  }
+
+  public void testNoMatch() throws Exception {
+    PayloadTermQuery query = new PayloadTermQuery(new Term(PayloadHelper.FIELD, "junk"),
+            new MaxPayloadFunction());
+    TopDocs hits = searcher.search(query, null, 100);
+    assertTrue("hits is null and it shouldn't be", hits != null);
+    assertTrue("hits Size: " + hits.totalHits + " is not: " + 0, hits.totalHits == 0);
+
+  }
+
+  public void testNoPayload() throws Exception {
+    PayloadTermQuery q1 = new PayloadTermQuery(new Term(PayloadHelper.NO_PAYLOAD_FIELD, "zero"),
+            new MaxPayloadFunction());
+    PayloadTermQuery q2 = new PayloadTermQuery(new Term(PayloadHelper.NO_PAYLOAD_FIELD, "foo"),
+            new MaxPayloadFunction());
+    BooleanClause c1 = new BooleanClause(q1, BooleanClause.Occur.MUST);
+    BooleanClause c2 = new BooleanClause(q2, BooleanClause.Occur.MUST_NOT);
+    BooleanQuery query = new BooleanQuery();
+    query.add(c1);
+    query.add(c2);
+    TopDocs hits = searcher.search(query, null, 100);
+    assertTrue("hits is null and it shouldn't be", hits != null);
+    assertTrue("hits Size: " + hits.totalHits + " is not: " + 1, hits.totalHits == 1);
+    int[] results = new int[1];
+    results[0] = 0;//hits.scoreDocs[0].doc;
+    CheckHits.checkHitCollector(random, query, PayloadHelper.NO_PAYLOAD_FIELD, searcher, results);
+  }
+
+  // must be static for weight serialization tests 
+  static class BoostingSimilarity extends DefaultSimilarity {
+
+    // TODO: Remove warning after API has been finalized
+    @Override
+    public float scorePayload(int docId, String fieldName, int start, int end, byte[] payload, int offset, int length) {
+      //we know it is size 4 here, so ignore the offset/length
+      return payload[0];
+    }
+
+    //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+    //Make everything else 1 so we see the effect of the payload
+    //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+    @Override
+    public float computeNorm(String fieldName, FieldInvertState state) {
+      return state.getBoost();
+    }
+
+    @Override
+    public float queryNorm(float sumOfSquaredWeights) {
+      return 1;
+    }
+
+    @Override
+    public float sloppyFreq(int distance) {
+      return 1;
+    }
+
+    @Override
+    public float coord(int overlap, int maxOverlap) {
+      return 1;
+    }
+
+    @Override
+    public float idf(int docFreq, int numDocs) {
+      return 1;
+    }
+
+    @Override
+    public float tf(float freq) {
+      return freq == 0 ? 0 : 1;
+    }
+  }
+
+  static class FullSimilarity extends DefaultSimilarity{
+    public float scorePayload(int docId, String fieldName, byte[] payload, int offset, int length) {
+      //we know it is size 4 here, so ignore the offset/length
+      return payload[0];
+    }
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java b/lucene/backwards/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java
new file mode 100644
index 0000000..c687faf
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java
@@ -0,0 +1,147 @@
+package org.apache.lucene.search.spans;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Collection;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.Similarity;
+
+/**
+ * Holds all implementations of classes in the o.a.l.s.spans package as a
+ * back-compatibility test. It does not run any tests per-se, however if
+ * someone adds a method to an interface or abstract method to an abstract
+ * class, one of the implementations here will fail to compile and so we know
+ * back-compat policy was violated.
+ */
+final class JustCompileSearchSpans {
+
+  private static final String UNSUPPORTED_MSG = "unsupported: used for back-compat testing only !";
+
+  static final class JustCompileSpans extends Spans {
+
+    @Override
+    public int doc() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public int end() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public boolean next() throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public boolean skipTo(int target) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public int start() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public Collection<byte[]> getPayload() throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public boolean isPayloadAvailable() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+  }
+
+  static final class JustCompileSpanQuery extends SpanQuery {
+
+    @Override
+    public String getField() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public Spans getSpans(IndexReader reader) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public String toString(String field) {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+  }
+
+  static final class JustCompilePayloadSpans extends Spans {
+
+    @Override
+    public Collection<byte[]> getPayload() throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public boolean isPayloadAvailable() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public int doc() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public int end() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public boolean next() throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public boolean skipTo(int target) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public int start() {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+    
+  }
+  
+  static final class JustCompileSpanScorer extends SpanScorer {
+
+    protected JustCompileSpanScorer(Spans spans, Weight weight,
+        Similarity similarity, byte[] norms) throws IOException {
+      super(spans, weight, similarity, norms);
+    }
+
+    @Override
+    protected boolean setFreqCurrentDoc() throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/spans/TestBasics.java b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestBasics.java
new file mode 100644
index 0000000..a8a86f6
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestBasics.java
@@ -0,0 +1,632 @@
+package org.apache.lucene.search.spans;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.LowerCaseTokenizer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Payload;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.CheckHits;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.English;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Tests basic search capabilities.
+ *
+ * <p>Uses a collection of 1000 documents, each the english rendition of their
+ * document number.  For example, the document numbered 333 has text "three
+ * hundred thirty three".
+ *
+ * <p>Tests are each a single query, and its hits are checked to ensure that
+ * all and only the correct documents are returned, thus providing end-to-end
+ * testing of the indexing and search code.
+ *
+ */
+public class TestBasics extends LuceneTestCase {
+  private static IndexSearcher searcher;
+  private static IndexReader reader;
+  private static Directory directory;
+
+  static final class SimplePayloadFilter extends TokenFilter {
+    String fieldName;
+    int pos;
+    final PayloadAttribute payloadAttr;
+    final CharTermAttribute termAttr;
+
+    public SimplePayloadFilter(TokenStream input, String fieldName) {
+      super(input);
+      this.fieldName = fieldName;
+      pos = 0;
+      payloadAttr = input.addAttribute(PayloadAttribute.class);
+      termAttr = input.addAttribute(CharTermAttribute.class);
+    }
+
+    @Override
+    public boolean incrementToken() throws IOException {
+      if (input.incrementToken()) {
+        payloadAttr.setPayload(new Payload(("pos: " + pos).getBytes()));
+        pos++;
+        return true;
+      } else {
+        return false;
+      }
+    }
+
+    @Override
+    public void reset() throws IOException {
+      super.reset();
+      pos = 0;
+    }
+  }
+  
+  static final Analyzer simplePayloadAnalyzer = new Analyzer() {
+
+    @Override
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      return new SimplePayloadFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader), fieldName);
+    }
+    
+  };
+  
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, simplePayloadAnalyzer)
+                                                     .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)).setMergePolicy(newLogMergePolicy()));
+    //writer.infoStream = System.out;
+    for (int i = 0; i < 2000; i++) {
+      Document doc = new Document();
+      doc.add(newField("field", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+    reader = writer.getReader();
+    searcher = newSearcher(reader);
+    writer.close();
+  }
+
+  @AfterClass
+  public static void afterClass() throws Exception {
+    searcher.close();
+    reader.close();
+    directory.close();
+    searcher = null;
+    reader = null;
+    directory = null;
+  }
+
+  @Test
+  public void testTerm() throws Exception {
+    Query query = new TermQuery(new Term("field", "seventy"));
+    checkHits(query, new int[]
+      {70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 170, 171, 172, 173, 174, 175,
+              176, 177, 178, 179, 270, 271, 272, 273, 274, 275, 276, 277, 278,
+              279, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 470, 471,
+              472, 473, 474, 475, 476, 477, 478, 479, 570, 571, 572, 573, 574,
+              575, 576, 577, 578, 579, 670, 671, 672, 673, 674, 675, 676, 677,
+              678, 679, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 870,
+              871, 872, 873, 874, 875, 876, 877, 878, 879, 970, 971, 972, 973,
+              974, 975, 976, 977, 978, 979, 1070, 1071, 1072, 1073, 1074, 1075,
+              1076, 1077, 1078, 1079, 1170, 1171, 1172, 1173, 1174, 1175, 1176,
+              1177, 1178, 1179, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277,
+              1278, 1279, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378,
+              1379, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478, 1479,
+              1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1670,
+              1671, 1672, 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1770, 1771,
+              1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1870, 1871, 1872,
+              1873, 1874, 1875, 1876, 1877,
+              1878, 1879, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978,
+              1979});
+    }
+
+  @Test
+  public void testTerm2() throws Exception {
+    Query query = new TermQuery(new Term("field", "seventish"));
+    checkHits(query, new int[] {});
+  }
+
+  @Test
+  public void testPhrase() throws Exception {
+    PhraseQuery query = new PhraseQuery();
+    query.add(new Term("field", "seventy"));
+    query.add(new Term("field", "seven"));
+    checkHits(query, new int[]
+      {77, 177, 277, 377, 477, 577, 677, 777, 877,
+              977, 1077, 1177, 1277, 1377, 1477, 1577, 1677, 1777, 1877, 1977});
+  }
+
+  @Test
+  public void testPhrase2() throws Exception {
+    PhraseQuery query = new PhraseQuery();
+    query.add(new Term("field", "seventish"));
+    query.add(new Term("field", "sevenon"));
+    checkHits(query, new int[] {});
+  }
+
+  @Test
+  public void testBoolean() throws Exception {
+    BooleanQuery query = new BooleanQuery();
+    query.add(new TermQuery(new Term("field", "seventy")), BooleanClause.Occur.MUST);
+    query.add(new TermQuery(new Term("field", "seven")), BooleanClause.Occur.MUST);
+    checkHits(query, new int[]
+      {77, 177, 277, 377, 477, 577, 677, 770, 771, 772, 773, 774, 775, 776, 777,
+              778, 779, 877, 977, 1077, 1177, 1277, 1377, 1477, 1577, 1677,
+              1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1877,
+              1977});
+  }
+
+  @Test
+  public void testBoolean2() throws Exception {
+    BooleanQuery query = new BooleanQuery();
+    query.add(new TermQuery(new Term("field", "sevento")), BooleanClause.Occur.MUST);
+    query.add(new TermQuery(new Term("field", "sevenly")), BooleanClause.Occur.MUST);
+    checkHits(query, new int[] {});
+  }
+
+  @Test
+  public void testSpanNearExact() throws Exception {
+    SpanTermQuery term1 = new SpanTermQuery(new Term("field", "seventy"));
+    SpanTermQuery term2 = new SpanTermQuery(new Term("field", "seven"));
+    SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {term1, term2},
+                                            0, true);
+    checkHits(query, new int[]
+      {77, 177, 277, 377, 477, 577, 677, 777, 877, 977, 1077, 1177, 1277, 1377, 1477, 1577, 1677, 1777, 1877, 1977});
+
+    assertTrue(searcher.explain(query, 77).getValue() > 0.0f);
+    assertTrue(searcher.explain(query, 977).getValue() > 0.0f);
+
+    QueryUtils.check(term1);
+    QueryUtils.check(term2);
+    QueryUtils.checkUnequal(term1,term2);
+  }
+  
+  public void testSpanTermQuery() throws Exception {
+    SpanTermQuery term1 = new SpanTermQuery(new Term("field", "seventy"));
+    checkHits(term1, new int[]
+                             { 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 170,
+        171, 172, 173, 174, 175, 176, 177, 178, 179, 270, 271, 272, 273, 274,
+        275, 276, 277, 278, 279, 370, 371, 372, 373, 374, 375, 376, 377, 378,
+        379, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 570, 571, 572,
+        573, 574, 575, 576, 577, 578, 579, 670, 671, 672, 673, 674, 675, 676,
+        677, 678, 679, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 870,
+        871, 872, 873, 874, 875, 876, 877, 878, 879, 970, 971, 972, 973, 974,
+        975, 976, 977, 978, 979, 1070, 1071, 1072, 1073, 1074, 1075, 1076,
+        1077, 1078, 1079, 1170, 1270, 1370, 1470, 1570, 1670, 1770, 1870, 1970,
+        1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1271, 1272, 1273,
+        1274, 1275, 1276, 1277, 1278, 1279, 1371, 1372, 1373, 1374, 1375, 1376,
+        1377, 1378, 1379, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478, 1479,
+        1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1671, 1672, 1673,
+        1674, 1675, 1676, 1677, 1678, 1679, 1771, 1772, 1773, 1774, 1775, 1776,
+        1777, 1778, 1779, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1879,
+        1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979 });
+  }
+
+  @Test
+  public void testSpanNearUnordered() throws Exception {
+    SpanTermQuery term1 = new SpanTermQuery(new Term("field", "nine"));
+    SpanTermQuery term2 = new SpanTermQuery(new Term("field", "six"));
+    SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {term1, term2},
+                                            4, false);
+
+    checkHits(query, new int[]
+      {609, 629, 639, 649, 659, 669, 679, 689, 699, 906, 926, 936, 946, 956,
+              966, 976, 986, 996, 1609, 1629, 1639, 1649, 1659, 1669,
+              1679, 1689, 1699, 1906, 1926, 1936, 1946, 1956, 1966, 1976, 1986,
+              1996});
+  }
+
+  @Test
+  public void testSpanNearOrdered() throws Exception {
+    SpanTermQuery term1 = new SpanTermQuery(new Term("field", "nine"));
+    SpanTermQuery term2 = new SpanTermQuery(new Term("field", "six"));
+    SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {term1, term2},
+                                            4, true);
+    checkHits(query, new int[]
+      {906, 926, 936, 946, 956, 966, 976, 986, 996, 1906, 1926, 1936, 1946, 1956, 1966, 1976, 1986, 1996});
+  }
+
+  @Test
+  public void testSpanNot() throws Exception {
+    SpanTermQuery term1 = new SpanTermQuery(new Term("field", "eight"));
+    SpanTermQuery term2 = new SpanTermQuery(new Term("field", "one"));
+    SpanNearQuery near = new SpanNearQuery(new SpanQuery[] {term1, term2},
+                                           4, true);
+    SpanTermQuery term3 = new SpanTermQuery(new Term("field", "forty"));
+    SpanNotQuery query = new SpanNotQuery(near, term3);
+
+    checkHits(query, new int[]
+      {801, 821, 831, 851, 861, 871, 881, 891, 1801, 1821, 1831, 1851, 1861, 1871, 1881, 1891});
+
+    assertTrue(searcher.explain(query, 801).getValue() > 0.0f);
+    assertTrue(searcher.explain(query, 891).getValue() > 0.0f);
+  }
+  
+  @Test
+  public void testSpanWithMultipleNotSingle() throws Exception {
+    SpanTermQuery term1 = new SpanTermQuery(new Term("field", "eight"));
+    SpanTermQuery term2 = new SpanTermQuery(new Term("field", "one"));
+    SpanNearQuery near = new SpanNearQuery(new SpanQuery[] {term1, term2},
+                                           4, true);
+    SpanTermQuery term3 = new SpanTermQuery(new Term("field", "forty"));
+
+    SpanOrQuery or = new SpanOrQuery(term3);
+
+    SpanNotQuery query = new SpanNotQuery(near, or);
+
+    checkHits(query, new int[]
+      {801, 821, 831, 851, 861, 871, 881, 891,
+              1801, 1821, 1831, 1851, 1861, 1871, 1881, 1891});
+
+    assertTrue(searcher.explain(query, 801).getValue() > 0.0f);
+    assertTrue(searcher.explain(query, 891).getValue() > 0.0f);
+  }
+
+  @Test
+  public void testSpanWithMultipleNotMany() throws Exception {
+    SpanTermQuery term1 = new SpanTermQuery(new Term("field", "eight"));
+    SpanTermQuery term2 = new SpanTermQuery(new Term("field", "one"));
+    SpanNearQuery near = new SpanNearQuery(new SpanQuery[] {term1, term2},
+                                           4, true);
+    SpanTermQuery term3 = new SpanTermQuery(new Term("field", "forty"));
+    SpanTermQuery term4 = new SpanTermQuery(new Term("field", "sixty"));
+    SpanTermQuery term5 = new SpanTermQuery(new Term("field", "eighty"));
+
+    SpanOrQuery or = new SpanOrQuery(term3, term4, term5);
+
+    SpanNotQuery query = new SpanNotQuery(near, or);
+
+    checkHits(query, new int[]
+      {801, 821, 831, 851, 871, 891, 1801, 1821, 1831, 1851, 1871, 1891});
+
+    assertTrue(searcher.explain(query, 801).getValue() > 0.0f);
+    assertTrue(searcher.explain(query, 891).getValue() > 0.0f);
+  }
+
+  @Test
+  public void testNpeInSpanNearWithSpanNot() throws Exception {
+    SpanTermQuery term1 = new SpanTermQuery(new Term("field", "eight"));
+    SpanTermQuery term2 = new SpanTermQuery(new Term("field", "one"));
+    SpanNearQuery near = new SpanNearQuery(new SpanQuery[] {term1, term2},
+                                           4, true);
+    SpanTermQuery hun = new SpanTermQuery(new Term("field", "hundred"));
+    SpanTermQuery term3 = new SpanTermQuery(new Term("field", "forty"));
+    SpanNearQuery exclude = new SpanNearQuery(new SpanQuery[] {hun, term3},
+                                              1, true);
+    
+    SpanNotQuery query = new SpanNotQuery(near, exclude);
+
+    checkHits(query, new int[]
+      {801, 821, 831, 851, 861, 871, 881, 891,
+              1801, 1821, 1831, 1851, 1861, 1871, 1881, 1891});
+
+    assertTrue(searcher.explain(query, 801).getValue() > 0.0f);
+    assertTrue(searcher.explain(query, 891).getValue() > 0.0f);
+  }
+
+  @Test
+  public void testNpeInSpanNearInSpanFirstInSpanNot() throws Exception {
+    int n = 5;
+    SpanTermQuery hun = new SpanTermQuery(new Term("field", "hundred"));
+    SpanTermQuery term40 = new SpanTermQuery(new Term("field", "forty"));
+    SpanTermQuery term40c = (SpanTermQuery)term40.clone();
+
+    SpanFirstQuery include = new SpanFirstQuery(term40, n);
+    SpanNearQuery near = new SpanNearQuery(new SpanQuery[]{hun, term40c},
+                                           n-1, true);
+    SpanFirstQuery exclude = new SpanFirstQuery(near, n-1);
+    SpanNotQuery q = new SpanNotQuery(include, exclude);
+    
+    checkHits(q, new int[]{40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048,
+            1049, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1240, 1241, 1242, 1243, 1244,
+            1245, 1246, 1247, 1248, 1249, 1340, 1341, 1342, 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1440, 1441, 1442,
+            1443, 1444, 1445, 1446, 1447, 1448, 1449, 1540, 1541, 1542, 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1640,
+            1641, 1642, 1643, 1644, 1645, 1646, 1647,
+            1648, 1649, 1740, 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, 1749, 1840, 1841, 1842, 1843, 1844, 1845, 1846,
+            1847, 1848, 1849, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1949});
+  }
+  
+  @Test
+  public void testSpanFirst() throws Exception {
+    SpanTermQuery term1 = new SpanTermQuery(new Term("field", "five"));
+    SpanFirstQuery query = new SpanFirstQuery(term1, 1);
+
+    checkHits(query, new int[]
+      {5, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513,
+       514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527,
+       528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541,
+       542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555,
+       556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569,
+       570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583,
+       584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597,
+       598, 599});
+
+    assertTrue(searcher.explain(query, 5).getValue() > 0.0f);
+    assertTrue(searcher.explain(query, 599).getValue() > 0.0f);
+
+  }
+
+  @Test
+  public void testSpanPositionRange() throws Exception {
+    SpanPositionRangeQuery query;
+    SpanTermQuery term1 = new SpanTermQuery(new Term("field", "five"));
+    query = new SpanPositionRangeQuery(term1, 1, 2);
+    checkHits(query, new int[]
+      {25,35, 45, 55, 65, 75, 85, 95});
+    assertTrue(searcher.explain(query, 25).getValue() > 0.0f);
+    assertTrue(searcher.explain(query, 95).getValue() > 0.0f);
+
+    query = new SpanPositionRangeQuery(term1, 0, 1);
+    checkHits(query, new int[]
+      {5, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512,
+              513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525,
+              526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538,
+              539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551,
+              552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564,
+              565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577,
+              578, 579, 580, 581, 582, 583, 584,
+              585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597,
+              598, 599});
+
+    query = new SpanPositionRangeQuery(term1, 6, 7);
+    checkHits(query, new int[]{});
+  }
+
+  @Test
+  public void testSpanPayloadCheck() throws Exception {
+    SpanTermQuery term1 = new SpanTermQuery(new Term("field", "five"));
+    Payload pay = new Payload(("pos: " + 5).getBytes());
+    SpanQuery query = new SpanPayloadCheckQuery(term1, Collections.singletonList(pay.getData()));
+    checkHits(query, new int[]
+      {1125, 1135, 1145, 1155, 1165, 1175, 1185, 1195, 1225, 1235, 1245, 1255, 1265, 1275, 1285, 1295, 1325, 1335, 1345, 1355, 1365, 1375, 1385, 1395, 1425, 1435, 1445, 1455, 1465, 1475, 1485, 1495, 1525, 1535, 1545, 1555, 1565, 1575, 1585, 1595, 1625, 1635, 1645, 1655, 1665, 1675, 1685, 1695, 1725, 1735, 1745, 1755, 1765, 1775, 1785, 1795, 1825, 1835, 1845, 1855, 1865, 1875, 1885, 1895, 1925, 1935, 1945, 1955, 1965, 1975, 1985, 1995});
+    assertTrue(searcher.explain(query, 1125).getValue() > 0.0f);
+
+    SpanTermQuery term2 = new SpanTermQuery(new Term("field", "hundred"));
+    SpanNearQuery snq;
+    SpanQuery[] clauses;
+    List<byte[]> list;
+    Payload pay2;
+    clauses = new SpanQuery[2];
+    clauses[0] = term1;
+    clauses[1] = term2;
+    snq = new SpanNearQuery(clauses, 0, true);
+    pay = new Payload(("pos: " + 0).getBytes());
+    pay2 = new Payload(("pos: " + 1).getBytes());
+    list = new ArrayList<byte[]>();
+    list.add(pay.getData());
+    list.add(pay2.getData());
+    query = new SpanNearPayloadCheckQuery(snq, list);
+    checkHits(query, new int[]
+      {500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599});
+    clauses = new SpanQuery[3];
+    clauses[0] = term1;
+    clauses[1] = term2;
+    clauses[2] = new SpanTermQuery(new Term("field", "five"));
+    snq = new SpanNearQuery(clauses, 0, true);
+    pay = new Payload(("pos: " + 0).getBytes());
+    pay2 = new Payload(("pos: " + 1).getBytes());
+    Payload pay3 = new Payload(("pos: " + 2).getBytes());
+    list = new ArrayList<byte[]>();
+    list.add(pay.getData());
+    list.add(pay2.getData());
+    list.add(pay3.getData());
+    query = new SpanNearPayloadCheckQuery(snq, list);
+    checkHits(query, new int[]
+      {505});
+  }
+
+  public void testComplexSpanChecks() throws Exception {
+    SpanTermQuery one = new SpanTermQuery(new Term("field", "one"));
+    SpanTermQuery thous = new SpanTermQuery(new Term("field", "thousand"));
+    //should be one position in between
+    SpanTermQuery hundred = new SpanTermQuery(new Term("field", "hundred"));
+    SpanTermQuery three = new SpanTermQuery(new Term("field", "three"));
+
+    SpanNearQuery oneThous = new SpanNearQuery(new SpanQuery[]{one, thous}, 0, true);
+    SpanNearQuery hundredThree = new SpanNearQuery(new SpanQuery[]{hundred, three}, 0, true);
+    SpanNearQuery oneThousHunThree = new SpanNearQuery(new SpanQuery[]{oneThous, hundredThree}, 1, true);
+    SpanQuery query;
+    //this one's too small
+    query = new SpanPositionRangeQuery(oneThousHunThree, 1, 2);
+    checkHits(query, new int[]{});
+    //this one's just right
+    query = new SpanPositionRangeQuery(oneThousHunThree, 0, 6);
+    checkHits(query, new int[]{1103, 1203,1303,1403,1503,1603,1703,1803,1903});
+
+    Collection<byte[]> payloads = new ArrayList<byte[]>();
+    Payload pay = new Payload(("pos: " + 0).getBytes());
+    Payload pay2 = new Payload(("pos: " + 1).getBytes());
+    Payload pay3 = new Payload(("pos: " + 3).getBytes());
+    Payload pay4 = new Payload(("pos: " + 4).getBytes());
+    payloads.add(pay.getData());
+    payloads.add(pay2.getData());
+    payloads.add(pay3.getData());
+    payloads.add(pay4.getData());
+    query = new SpanNearPayloadCheckQuery(oneThousHunThree, payloads);
+    checkHits(query, new int[]{1103, 1203,1303,1403,1503,1603,1703,1803,1903});
+
+  }
+
+
+  @Test
+  public void testSpanOr() throws Exception {
+    SpanTermQuery term1 = new SpanTermQuery(new Term("field", "thirty"));
+    SpanTermQuery term2 = new SpanTermQuery(new Term("field", "three"));
+    SpanNearQuery near1 = new SpanNearQuery(new SpanQuery[] {term1, term2},
+                                            0, true);
+    SpanTermQuery term3 = new SpanTermQuery(new Term("field", "forty"));
+    SpanTermQuery term4 = new SpanTermQuery(new Term("field", "seven"));
+    SpanNearQuery near2 = new SpanNearQuery(new SpanQuery[] {term3, term4},
+                                            0, true);
+
+    SpanOrQuery query = new SpanOrQuery(near1, near2);
+
+    checkHits(query, new int[]
+      {33, 47, 133, 147, 233, 247, 333, 347, 433, 447, 533, 547, 633, 647, 733,
+              747, 833, 847, 933, 947, 1033, 1047, 1133, 1147, 1233, 1247, 1333,
+              1347, 1433, 1447, 1533, 1547, 1633, 1647, 1733, 1747, 1833, 1847, 1933, 1947});
+
+    assertTrue(searcher.explain(query, 33).getValue() > 0.0f);
+    assertTrue(searcher.explain(query, 947).getValue() > 0.0f);
+  }
+
+  @Test
+  public void testSpanExactNested() throws Exception {
+    SpanTermQuery term1 = new SpanTermQuery(new Term("field", "three"));
+    SpanTermQuery term2 = new SpanTermQuery(new Term("field", "hundred"));
+    SpanNearQuery near1 = new SpanNearQuery(new SpanQuery[] {term1, term2},
+                                            0, true);
+    SpanTermQuery term3 = new SpanTermQuery(new Term("field", "thirty"));
+    SpanTermQuery term4 = new SpanTermQuery(new Term("field", "three"));
+    SpanNearQuery near2 = new SpanNearQuery(new SpanQuery[] {term3, term4},
+                                            0, true);
+
+    SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {near1, near2},
+                                            0, true);
+
+    checkHits(query, new int[] {333, 1333});
+
+    assertTrue(searcher.explain(query, 333).getValue() > 0.0f);
+  }
+
+  @Test
+  public void testSpanNearOr() throws Exception {
+
+    SpanTermQuery t1 = new SpanTermQuery(new Term("field","six"));
+    SpanTermQuery t3 = new SpanTermQuery(new Term("field","seven"));
+    
+    SpanTermQuery t5 = new SpanTermQuery(new Term("field","seven"));
+    SpanTermQuery t6 = new SpanTermQuery(new Term("field","six"));
+
+    SpanOrQuery to1 = new SpanOrQuery(t1, t3);
+    SpanOrQuery to2 = new SpanOrQuery(t5, t6);
+    
+    SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {to1, to2},
+                                            10, true);
+
+    checkHits(query, new int[]
+      {606, 607, 626, 627, 636, 637, 646, 647, 656, 657, 666, 667, 676, 677,
+              686, 687, 696, 697, 706, 707, 726, 727, 736, 737, 746, 747, 756,
+              757, 766, 767, 776, 777, 786, 787, 796, 797, 1606, 1607, 1626,
+              1627, 1636, 1637, 1646, 1647, 1656, 1657, 1666, 1667, 1676, 1677,
+              1686, 1687, 1696, 1697, 1706, 1707, 1726, 1727, 1736, 1737,
+              1746, 1747, 1756, 1757, 1766, 1767, 1776, 1777, 1786, 1787, 1796,
+              1797});
+  }
+
+  @Test
+  public void testSpanComplex1() throws Exception {
+      
+    SpanTermQuery t1 = new SpanTermQuery(new Term("field","six"));
+    SpanTermQuery t2 = new SpanTermQuery(new Term("field","hundred"));
+    SpanNearQuery tt1 = new SpanNearQuery(new SpanQuery[] {t1, t2}, 0,true);
+
+    SpanTermQuery t3 = new SpanTermQuery(new Term("field","seven"));
+    SpanTermQuery t4 = new SpanTermQuery(new Term("field","hundred"));
+    SpanNearQuery tt2 = new SpanNearQuery(new SpanQuery[] {t3, t4}, 0,true);
+    
+    SpanTermQuery t5 = new SpanTermQuery(new Term("field","seven"));
+    SpanTermQuery t6 = new SpanTermQuery(new Term("field","six"));
+
+    SpanOrQuery to1 = new SpanOrQuery(tt1, tt2);
+    SpanOrQuery to2 = new SpanOrQuery(t5, t6);
+    
+    SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {to1, to2},
+                                            100, true);
+    
+    checkHits(query, new int[]
+      {606, 607, 626, 627, 636, 637, 646, 647, 656, 657, 666, 667, 676, 677, 686, 687, 696,
+              697, 706, 707, 726, 727, 736, 737, 746, 747, 756, 757,
+              766, 767, 776, 777, 786, 787, 796, 797, 1606, 1607, 1626, 1627, 1636, 1637, 1646,
+              1647, 1656, 1657,
+              1666, 1667, 1676, 1677, 1686, 1687, 1696, 1697, 1706, 1707, 1726, 1727, 1736, 1737,
+              1746, 1747, 1756, 1757, 1766, 1767, 1776, 1777, 1786, 1787, 1796, 1797});
+  }
+  
+  @Test
+  public void testSpansSkipTo() throws Exception {
+    SpanTermQuery t1 = new SpanTermQuery(new Term("field", "seventy"));
+    SpanTermQuery t2 = new SpanTermQuery(new Term("field", "seventy"));
+    Spans s1 = t1.getSpans(searcher.getIndexReader());
+    Spans s2 = t2.getSpans(searcher.getIndexReader());
+
+    assertTrue(s1.next());
+    assertTrue(s2.next());
+
+    boolean hasMore = true;
+
+    do {
+      hasMore = skipToAccoringToJavaDocs(s1, s1.doc());
+      assertEquals(hasMore, s2.skipTo(s2.doc()));
+      assertEquals(s1.doc(), s2.doc());
+    } while (hasMore);
+  }
+
+  /** Skips to the first match beyond the current, whose document number is
+   * greater than or equal to <i>target</i>. <p>Returns true iff there is such
+   * a match.  <p>Behaves as if written: <pre>
+   *   boolean skipTo(int target) {
+   *     do {
+   *       if (!next())
+   *       return false;
+   *     } while (target > doc());
+   *     return true;
+   *   }
+   * </pre>
+   */
+  private boolean skipToAccoringToJavaDocs(Spans s, int target)
+      throws Exception {
+    do {
+      if (!s.next())
+        return false;
+    } while (target > s.doc());
+    return true;
+
+  }
+
+  private void checkHits(Query query, int[] results) throws IOException {
+    CheckHits.checkHits(random, query, "field", searcher, results);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
new file mode 100644
index 0000000..4bae375
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
@@ -0,0 +1,352 @@
+package org.apache.lucene.search.spans;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.CheckHits;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+public class TestFieldMaskingSpanQuery extends LuceneTestCase {
+
+  protected static Document doc(Field[] fields) {
+    Document doc = new Document();
+    for (int i = 0; i < fields.length; i++) {
+      doc.add(fields[i]);
+    }
+    return doc;
+  }
+  
+  protected static Field field(String name, String value) {
+    return newField(name, value, Field.Store.NO, Field.Index.ANALYZED);
+  }
+
+  protected static IndexSearcher searcher;
+  protected static Directory directory;
+  protected static IndexReader reader;
+  
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    directory = newDirectory();
+    RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    
+    writer.addDocument(doc(new Field[] { field("id", "0")
+                                         ,
+                                         field("gender", "male"),
+                                         field("first",  "james"),
+                                         field("last",   "jones")     }));
+                                               
+    writer.addDocument(doc(new Field[] { field("id", "1")
+                                         ,
+                                         field("gender", "male"),
+                                         field("first",  "james"),
+                                         field("last",   "smith")
+                                         ,
+                                         field("gender", "female"),
+                                         field("first",  "sally"),
+                                         field("last",   "jones")     }));
+    
+    writer.addDocument(doc(new Field[] { field("id", "2")
+                                         ,
+                                         field("gender", "female"),
+                                         field("first",  "greta"),
+                                         field("last",   "jones")
+                                         ,
+                                         field("gender", "female"),
+                                         field("first",  "sally"),
+                                         field("last",   "smith")
+                                         ,
+                                         field("gender", "male"),
+                                         field("first",  "james"),
+                                         field("last",   "jones")     }));
+     
+    writer.addDocument(doc(new Field[] { field("id", "3")
+                                         ,
+                                         field("gender", "female"),
+                                         field("first",  "lisa"),
+                                         field("last",   "jones")
+                                         ,
+                                         field("gender", "male"),
+                                         field("first",  "bob"),
+                                         field("last",   "costas")     }));
+    
+    writer.addDocument(doc(new Field[] { field("id", "4")
+                                         ,
+                                         field("gender", "female"),
+                                         field("first",  "sally"),
+                                         field("last",   "smith")
+                                         ,
+                                         field("gender", "female"),
+                                         field("first",  "linda"),
+                                         field("last",   "dixit")
+                                         ,
+                                         field("gender", "male"),
+                                         field("first",  "bubba"),
+                                         field("last",   "jones")     }));
+    reader = writer.getReader();
+    writer.close();
+    searcher = newSearcher(reader);
+  }
+
+  @AfterClass
+  public static void afterClass() throws Exception {
+    searcher.close();
+    searcher = null;
+    reader.close();
+    reader = null;
+    directory.close();
+    directory = null;
+  }
+
+  protected void check(SpanQuery q, int[] docs) throws Exception {
+    CheckHits.checkHitCollector(random, q, null, searcher, docs);
+  }
+
+  public void testRewrite0() throws Exception {
+    SpanQuery q = new FieldMaskingSpanQuery
+      (new SpanTermQuery(new Term("last", "sally")) , "first");
+    q.setBoost(8.7654321f);
+    SpanQuery qr = (SpanQuery) searcher.rewrite(q);
+
+    QueryUtils.checkEqual(q, qr);
+
+    Set<Term> terms = new HashSet<Term>();
+    qr.extractTerms(terms);
+    assertEquals(1, terms.size());
+  }
+  
+  public void testRewrite1() throws Exception {
+    // mask an anon SpanQuery class that rewrites to something else.
+    SpanQuery q = new FieldMaskingSpanQuery
+      (new SpanTermQuery(new Term("last", "sally")) {
+          @Override
+          public Query rewrite(IndexReader reader) {
+            return new SpanOrQuery(new SpanQuery[] {
+              new SpanTermQuery(new Term("first", "sally")),
+              new SpanTermQuery(new Term("first", "james")) });
+          }
+        }, "first");
+
+    SpanQuery qr = (SpanQuery) searcher.rewrite(q);
+
+    QueryUtils.checkUnequal(q, qr);
+
+    Set<Term> terms = new HashSet<Term>();
+    qr.extractTerms(terms);
+    assertEquals(2, terms.size());
+  }
+  
+  public void testRewrite2() throws Exception {
+    SpanQuery q1 = new SpanTermQuery(new Term("last", "smith"));
+    SpanQuery q2 = new SpanTermQuery(new Term("last", "jones"));
+    SpanQuery q = new SpanNearQuery(new SpanQuery[]
+      { q1, new FieldMaskingSpanQuery(q2, "last")}, 1, true );
+    Query qr = searcher.rewrite(q);
+
+    QueryUtils.checkEqual(q, qr);
+
+    HashSet<Term> set = new HashSet<Term>();
+    qr.extractTerms(set);
+    assertEquals(2, set.size());
+  }
+  
+  public void testEquality1() {
+    SpanQuery q1 = new FieldMaskingSpanQuery
+      (new SpanTermQuery(new Term("last", "sally")) , "first");
+    SpanQuery q2 = new FieldMaskingSpanQuery
+      (new SpanTermQuery(new Term("last", "sally")) , "first");
+    SpanQuery q3 = new FieldMaskingSpanQuery
+      (new SpanTermQuery(new Term("last", "sally")) , "XXXXX");
+    SpanQuery q4 = new FieldMaskingSpanQuery
+      (new SpanTermQuery(new Term("last", "XXXXX")) , "first");
+    SpanQuery q5 = new FieldMaskingSpanQuery
+      (new SpanTermQuery(new Term("xXXX", "sally")) , "first");
+    QueryUtils.checkEqual(q1, q2);
+    QueryUtils.checkUnequal(q1, q3);
+    QueryUtils.checkUnequal(q1, q4);
+    QueryUtils.checkUnequal(q1, q5);
+    
+    SpanQuery qA = new FieldMaskingSpanQuery
+      (new SpanTermQuery(new Term("last", "sally")) , "first");
+    qA.setBoost(9f);
+    SpanQuery qB = new FieldMaskingSpanQuery
+      (new SpanTermQuery(new Term("last", "sally")) , "first");
+    QueryUtils.checkUnequal(qA, qB);
+    qB.setBoost(9f);
+    QueryUtils.checkEqual(qA, qB);
+    
+  }
+  
+  public void testNoop0() throws Exception {
+    SpanQuery q1 = new SpanTermQuery(new Term("last", "sally"));
+    SpanQuery q = new FieldMaskingSpanQuery(q1, "first");
+    check(q, new int[] { /* :EMPTY: */ });
+  }
+  public void testNoop1() throws Exception {
+    SpanQuery q1 = new SpanTermQuery(new Term("last", "smith"));
+    SpanQuery q2 = new SpanTermQuery(new Term("last", "jones"));
+    SpanQuery q = new SpanNearQuery(new SpanQuery[]
+      { q1, new FieldMaskingSpanQuery(q2, "last")}, 0, true );
+    check(q, new int[] { 1, 2 });
+    q = new SpanNearQuery(new SpanQuery[]
+      { new FieldMaskingSpanQuery(q1, "last"),
+        new FieldMaskingSpanQuery(q2, "last")}, 0, true );
+    check(q, new int[] { 1, 2 });
+  }
+  
+  public void testSimple1() throws Exception {
+    SpanQuery q1 = new SpanTermQuery(new Term("first", "james"));
+    SpanQuery q2 = new SpanTermQuery(new Term("last", "jones"));
+    SpanQuery q = new SpanNearQuery(new SpanQuery[]
+      { q1, new FieldMaskingSpanQuery(q2, "first")}, -1, false );
+    check(q, new int[] { 0, 2 });
+    q = new SpanNearQuery(new SpanQuery[]
+      { new FieldMaskingSpanQuery(q2, "first"), q1}, -1, false );
+    check(q, new int[] { 0, 2 });
+    q = new SpanNearQuery(new SpanQuery[]
+      { q2, new FieldMaskingSpanQuery(q1, "last")}, -1, false );
+    check(q, new int[] { 0, 2 });
+    q = new SpanNearQuery(new SpanQuery[]
+      { new FieldMaskingSpanQuery(q1, "last"), q2}, -1, false );
+    check(q, new int[] { 0, 2 });
+
+  }
+  
+  public void testSimple2() throws Exception {
+    SpanQuery q1 = new SpanTermQuery(new Term("gender", "female"));
+    SpanQuery q2 = new SpanTermQuery(new Term("last", "smith"));
+    SpanQuery q = new SpanNearQuery(new SpanQuery[]
+      { q1, new FieldMaskingSpanQuery(q2, "gender")}, -1, false );
+    check(q, new int[] { 2, 4 });
+    q = new SpanNearQuery(new SpanQuery[]
+      { new FieldMaskingSpanQuery(q1, "id"),
+        new FieldMaskingSpanQuery(q2, "id") }, -1, false );
+    check(q, new int[] { 2, 4 });
+  }
+
+  public void testSpans0() throws Exception {
+    SpanQuery q1 = new SpanTermQuery(new Term("gender", "female"));
+    SpanQuery q2 = new SpanTermQuery(new Term("first",  "james"));
+    SpanQuery q  = new SpanOrQuery(new SpanQuery[]
+      { q1, new FieldMaskingSpanQuery(q2, "gender")});
+    check(q, new int[] { 0, 1, 2, 3, 4 });
+  
+    Spans span = q.getSpans(searcher.getIndexReader());
+    
+    assertEquals(true, span.next());
+    assertEquals(s(0,0,1), s(span));
+
+    assertEquals(true, span.next());
+    assertEquals(s(1,0,1), s(span));
+
+    assertEquals(true, span.next());
+    assertEquals(s(1,1,2), s(span));
+
+    assertEquals(true, span.next());
+    assertEquals(s(2,0,1), s(span));
+
+    assertEquals(true, span.next());
+    assertEquals(s(2,1,2), s(span));
+
+    assertEquals(true, span.next());
+    assertEquals(s(2,2,3), s(span));
+
+    assertEquals(true, span.next());
+    assertEquals(s(3,0,1), s(span));
+
+    assertEquals(true, span.next());
+    assertEquals(s(4,0,1), s(span));
+
+    assertEquals(true, span.next());
+    assertEquals(s(4,1,2), s(span));
+
+    assertEquals(false, span.next());
+  }
+  
+  public void testSpans1() throws Exception {
+    SpanQuery q1 = new SpanTermQuery(new Term("first", "sally"));
+    SpanQuery q2 = new SpanTermQuery(new Term("first", "james"));
+    SpanQuery qA = new SpanOrQuery(new SpanQuery[] { q1, q2 });
+    SpanQuery qB = new FieldMaskingSpanQuery(qA, "id");
+                                            
+    check(qA, new int[] { 0, 1, 2, 4 });
+    check(qB, new int[] { 0, 1, 2, 4 });
+  
+    Spans spanA = qA.getSpans(searcher.getIndexReader());
+    Spans spanB = qB.getSpans(searcher.getIndexReader());
+    
+    while (spanA.next()) {
+      assertTrue("spanB not still going", spanB.next());
+      assertEquals("spanA not equal spanB", s(spanA), s(spanB));
+    }
+    assertTrue("spanB still going even tough spanA is done", !(spanB.next()));
+
+  }
+  
+  public void testSpans2() throws Exception {
+    SpanQuery qA1 = new SpanTermQuery(new Term("gender", "female"));
+    SpanQuery qA2 = new SpanTermQuery(new Term("first",  "james"));
+    SpanQuery qA  = new SpanOrQuery(new SpanQuery[]
+      { qA1, new FieldMaskingSpanQuery(qA2, "gender")});
+    SpanQuery qB  = new SpanTermQuery(new Term("last",   "jones"));
+    SpanQuery q   = new SpanNearQuery(new SpanQuery[]
+      { new FieldMaskingSpanQuery(qA, "id"),
+        new FieldMaskingSpanQuery(qB, "id") }, -1, false );
+    check(q, new int[] { 0, 1, 2, 3 });
+  
+    Spans span = q.getSpans(searcher.getIndexReader());
+    
+    assertEquals(true, span.next());
+    assertEquals(s(0,0,1), s(span));
+
+    assertEquals(true, span.next());
+    assertEquals(s(1,1,2), s(span));
+
+    assertEquals(true, span.next());
+    assertEquals(s(2,0,1), s(span));
+
+    assertEquals(true, span.next());
+    assertEquals(s(2,2,3), s(span));
+
+    assertEquals(true, span.next());
+    assertEquals(s(3,0,1), s(span));
+
+    assertEquals(false, span.next());
+  }
+  
+  public String s(Spans span) {
+    return s(span.doc(), span.start(), span.end());
+  }
+  public String s(int doc, int start, int end) {
+    return "s(" + doc + "," + start + "," + end +")";
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
new file mode 100644
index 0000000..d3fe2db
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
@@ -0,0 +1,187 @@
+package org.apache.lucene.search.spans;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.search.CheckHits;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestNearSpansOrdered extends LuceneTestCase {
+  protected IndexSearcher searcher;
+  protected Directory directory;
+  protected IndexReader reader;
+
+  public static final String FIELD = "field";
+  public static final QueryParser qp =
+    new QueryParser(TEST_VERSION_CURRENT, FIELD, new MockAnalyzer(random));
+
+  @Override
+  public void tearDown() throws Exception {
+    searcher.close();
+    reader.close();
+    directory.close();
+    super.tearDown();
+  }
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    directory = newDirectory();
+    RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    for (int i = 0; i < docFields.length; i++) {
+      Document doc = new Document();
+      doc.add(newField(FIELD, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+    reader = writer.getReader();
+    writer.close();
+    searcher = newSearcher(reader);
+  }
+
+  protected String[] docFields = {
+    "w1 w2 w3 w4 w5",
+    "w1 w3 w2 w3 zz",
+    "w1 xx w2 yy w3",
+    "w1 w3 xx w2 yy w3 zz"
+  };
+
+  protected SpanNearQuery makeQuery(String s1, String s2, String s3,
+                                    int slop, boolean inOrder) {
+    return new SpanNearQuery
+      (new SpanQuery[] {
+        new SpanTermQuery(new Term(FIELD, s1)),
+        new SpanTermQuery(new Term(FIELD, s2)),
+        new SpanTermQuery(new Term(FIELD, s3)) },
+       slop,
+       inOrder);
+  }
+  protected SpanNearQuery makeQuery() {
+    return makeQuery("w1","w2","w3",1,true);
+  }
+  
+  public void testSpanNearQuery() throws Exception {
+    SpanNearQuery q = makeQuery();
+    CheckHits.checkHits(random, q, FIELD, searcher, new int[] {0,1});
+  }
+
+  public String s(Spans span) {
+    return s(span.doc(), span.start(), span.end());
+  }
+  public String s(int doc, int start, int end) {
+    return "s(" + doc + "," + start + "," + end +")";
+  }
+  
+  public void testNearSpansNext() throws Exception {
+    SpanNearQuery q = makeQuery();
+    Spans span = q.getSpans(searcher.getIndexReader());
+    assertEquals(true, span.next());
+    assertEquals(s(0,0,3), s(span));
+    assertEquals(true, span.next());
+    assertEquals(s(1,0,4), s(span));
+    assertEquals(false, span.next());
+  }
+
+  /**
+   * test does not imply that skipTo(doc+1) should work exactly the
+   * same as next -- it's only applicable in this case since we know doc
+   * does not contain more than one span
+   */
+  public void testNearSpansSkipToLikeNext() throws Exception {
+    SpanNearQuery q = makeQuery();
+    Spans span = q.getSpans(searcher.getIndexReader());
+    assertEquals(true, span.skipTo(0));
+    assertEquals(s(0,0,3), s(span));
+    assertEquals(true, span.skipTo(1));
+    assertEquals(s(1,0,4), s(span));
+    assertEquals(false, span.skipTo(2));
+  }
+  
+  public void testNearSpansNextThenSkipTo() throws Exception {
+    SpanNearQuery q = makeQuery();
+    Spans span = q.getSpans(searcher.getIndexReader());
+    assertEquals(true, span.next());
+    assertEquals(s(0,0,3), s(span));
+    assertEquals(true, span.skipTo(1));
+    assertEquals(s(1,0,4), s(span));
+    assertEquals(false, span.next());
+  }
+  
+  public void testNearSpansNextThenSkipPast() throws Exception {
+    SpanNearQuery q = makeQuery();
+    Spans span = q.getSpans(searcher.getIndexReader());
+    assertEquals(true, span.next());
+    assertEquals(s(0,0,3), s(span));
+    assertEquals(false, span.skipTo(2));
+  }
+  
+  public void testNearSpansSkipPast() throws Exception {
+    SpanNearQuery q = makeQuery();
+    Spans span = q.getSpans(searcher.getIndexReader());
+    assertEquals(false, span.skipTo(2));
+  }
+  
+  public void testNearSpansSkipTo0() throws Exception {
+    SpanNearQuery q = makeQuery();
+    Spans span = q.getSpans(searcher.getIndexReader());
+    assertEquals(true, span.skipTo(0));
+    assertEquals(s(0,0,3), s(span));
+  }
+
+  public void testNearSpansSkipTo1() throws Exception {
+    SpanNearQuery q = makeQuery();
+    Spans span = q.getSpans(searcher.getIndexReader());
+    assertEquals(true, span.skipTo(1));
+    assertEquals(s(1,0,4), s(span));
+  }
+
+  /**
+   * not a direct test of NearSpans, but a demonstration of how/when
+   * this causes problems
+   */
+  public void testSpanNearScorerSkipTo1() throws Exception {
+    SpanNearQuery q = makeQuery();
+    Weight w = searcher.createNormalizedWeight(q);
+    Scorer s = w.scorer(searcher.getIndexReader(), true, false);
+    assertEquals(1, s.advance(1));
+  }
+  
+  /**
+   * not a direct test of NearSpans, but a demonstration of how/when
+   * this causes problems
+   */
+  public void testSpanNearScorerExplain() throws Exception {
+    SpanNearQuery q = makeQuery();
+    Explanation e = searcher.explain(q, 1);
+    assertTrue("Scorer explanation value for doc#1 isn't positive: "
+               + e.toString(),
+               0.0f < e.getValue());
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java
new file mode 100644
index 0000000..d2793e6
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java
@@ -0,0 +1,543 @@
+package org.apache.lucene.search.spans;
+
+/**
+ * Copyright 2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.LowerCaseTokenizer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.CorruptIndexException;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Payload;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.payloads.PayloadHelper;
+import org.apache.lucene.search.payloads.PayloadSpanUtil;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestPayloadSpans extends LuceneTestCase {
+  private IndexSearcher searcher;
+  private Similarity similarity = new DefaultSimilarity();
+  protected IndexReader indexReader;
+  private IndexReader closeIndexReader;
+  private Directory directory;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    PayloadHelper helper = new PayloadHelper();
+    searcher = helper.setUp(random, similarity, 1000);
+    indexReader = searcher.getIndexReader();
+  }
+
+  public void testSpanTermQuery() throws Exception {
+    SpanTermQuery stq;
+    Spans spans;
+    stq = new SpanTermQuery(new Term(PayloadHelper.FIELD, "seventy"));
+    spans = stq.getSpans(indexReader);
+    assertTrue("spans is null and it shouldn't be", spans != null);
+    checkSpans(spans, 100, 1, 1, 1);
+
+    stq = new SpanTermQuery(new Term(PayloadHelper.NO_PAYLOAD_FIELD, "seventy"));  
+    spans = stq.getSpans(indexReader);
+    assertTrue("spans is null and it shouldn't be", spans != null);
+    checkSpans(spans, 100, 0, 0, 0);
+  }
+
+  public void testSpanFirst() throws IOException {
+
+    SpanQuery match;
+    SpanFirstQuery sfq;
+    match = new SpanTermQuery(new Term(PayloadHelper.FIELD, "one"));
+    sfq = new SpanFirstQuery(match, 2);
+    Spans spans = sfq.getSpans(indexReader);
+    checkSpans(spans, 109, 1, 1, 1);
+    //Test more complicated subclause
+    SpanQuery[] clauses = new SpanQuery[2];
+    clauses[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "one"));
+    clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "hundred"));
+    match = new SpanNearQuery(clauses, 0, true);
+    sfq = new SpanFirstQuery(match, 2);
+    checkSpans(sfq.getSpans(indexReader), 100, 2, 1, 1);
+
+    match = new SpanNearQuery(clauses, 0, false);
+    sfq = new SpanFirstQuery(match, 2);
+    checkSpans(sfq.getSpans(indexReader), 100, 2, 1, 1);
+    
+  }
+  
+  public void testSpanNot() throws Exception {
+    SpanQuery[] clauses = new SpanQuery[2];
+    clauses[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "one"));
+    clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "three"));
+    SpanQuery spq = new SpanNearQuery(clauses, 5, true);
+    SpanNotQuery snq = new SpanNotQuery(spq, new SpanTermQuery(new Term(PayloadHelper.FIELD, "two")));
+
+
+
+    Directory directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+                                                     newIndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer()).setSimilarity(similarity));
+
+    Document doc = new Document();
+    doc.add(newField(PayloadHelper.FIELD, "one two three one four three",
+        Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    IndexReader reader = writer.getReader();
+    writer.close();
+
+    checkSpans(snq.getSpans(reader), 1,new int[]{2});
+    reader.close();
+    directory.close();
+  }
+  
+  public void testNestedSpans() throws Exception {
+    SpanTermQuery stq;
+    Spans spans;
+    IndexSearcher searcher = getSearcher();
+    stq = new SpanTermQuery(new Term(PayloadHelper.FIELD, "mark"));
+    spans = stq.getSpans(searcher.getIndexReader());
+    assertTrue("spans is null and it shouldn't be", spans != null);
+    checkSpans(spans, 0, null);
+
+
+    SpanQuery[] clauses = new SpanQuery[3];
+    clauses[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "rr"));
+    clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "yy"));
+    clauses[2] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "xx"));
+    SpanNearQuery spanNearQuery = new SpanNearQuery(clauses, 12, false);
+
+    spans = spanNearQuery.getSpans(searcher.getIndexReader());
+    assertTrue("spans is null and it shouldn't be", spans != null);
+    checkSpans(spans, 2, new int[]{3,3});
+
+     
+    clauses[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "xx"));
+    clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "rr"));
+    clauses[2] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "yy"));
+
+    spanNearQuery = new SpanNearQuery(clauses, 6, true);
+   
+    
+    spans = spanNearQuery.getSpans(searcher.getIndexReader());
+    assertTrue("spans is null and it shouldn't be", spans != null);
+    checkSpans(spans, 1, new int[]{3});
+     
+    clauses = new SpanQuery[2];
+     
+    clauses[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "xx"));
+    clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "rr"));
+
+    spanNearQuery = new SpanNearQuery(clauses, 6, true);
+     
+    // xx within 6 of rr
+    
+    SpanQuery[] clauses2 = new SpanQuery[2];
+     
+    clauses2[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "yy"));
+    clauses2[1] = spanNearQuery;
+     
+    SpanNearQuery nestedSpanNearQuery = new SpanNearQuery(clauses2, 6, false);
+    
+    // yy within 6 of xx within 6 of rr
+
+    spans = nestedSpanNearQuery.getSpans(searcher.getIndexReader());
+    assertTrue("spans is null and it shouldn't be", spans != null);
+    checkSpans(spans, 2, new int[]{3,3});
+    searcher.close();
+    closeIndexReader.close();
+    directory.close();
+  }
+  
+  public void testFirstClauseWithoutPayload() throws Exception {
+    Spans spans;
+    IndexSearcher searcher = getSearcher();
+
+    SpanQuery[] clauses = new SpanQuery[3];
+    clauses[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "nopayload"));
+    clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "qq"));
+    clauses[2] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "ss"));
+
+    SpanNearQuery spanNearQuery = new SpanNearQuery(clauses, 6, true);
+    
+    SpanQuery[] clauses2 = new SpanQuery[2];
+     
+    clauses2[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "pp"));
+    clauses2[1] = spanNearQuery;
+
+    SpanNearQuery snq = new SpanNearQuery(clauses2, 6, false);
+    
+    SpanQuery[] clauses3 = new SpanQuery[2];
+     
+    clauses3[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "np"));
+    clauses3[1] = snq;
+     
+    SpanNearQuery nestedSpanNearQuery = new SpanNearQuery(clauses3, 6, false);
+
+    spans = nestedSpanNearQuery.getSpans(searcher.getIndexReader());
+    assertTrue("spans is null and it shouldn't be", spans != null);
+    checkSpans(spans, 1, new int[]{3});
+    searcher.close();
+    closeIndexReader.close();
+    directory.close();
+  }
+  
+  public void testHeavilyNestedSpanQuery() throws Exception {
+    Spans spans;
+    IndexSearcher searcher = getSearcher();
+
+    SpanQuery[] clauses = new SpanQuery[3];
+    clauses[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "one"));
+    clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "two"));
+    clauses[2] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "three"));
+
+    SpanNearQuery spanNearQuery = new SpanNearQuery(clauses, 5, true);
+   
+    clauses = new SpanQuery[3];
+    clauses[0] = spanNearQuery; 
+    clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "five"));
+    clauses[2] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "six"));
+
+    SpanNearQuery spanNearQuery2 = new SpanNearQuery(clauses, 6, true);
+     
+    SpanQuery[] clauses2 = new SpanQuery[2];
+    clauses2[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "eleven"));
+    clauses2[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "ten"));
+    SpanNearQuery spanNearQuery3 = new SpanNearQuery(clauses2, 2, false);
+    
+    SpanQuery[] clauses3 = new SpanQuery[3];
+    clauses3[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "nine"));
+    clauses3[1] = spanNearQuery2;
+    clauses3[2] = spanNearQuery3;
+     
+    SpanNearQuery nestedSpanNearQuery = new SpanNearQuery(clauses3, 6, false);
+
+    spans = nestedSpanNearQuery.getSpans(searcher.getIndexReader());
+    assertTrue("spans is null and it shouldn't be", spans != null);
+    checkSpans(spans, 2, new int[]{8, 8});
+    searcher.close();
+    closeIndexReader.close();
+    directory.close();
+  }
+  
+  public void testShrinkToAfterShortestMatch() throws CorruptIndexException,
+      LockObtainFailedException, IOException {
+    Directory directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+                                                     newIndexWriterConfig(TEST_VERSION_CURRENT, new TestPayloadAnalyzer()));
+
+    Document doc = new Document();
+    doc.add(new Field("content", new StringReader("a b c d e f g h i j a k")));
+    writer.addDocument(doc);
+
+    IndexReader reader = writer.getReader();
+    IndexSearcher is = newSearcher(reader);
+    writer.close();
+
+    SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a"));
+    SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k"));
+    SpanQuery[] sqs = { stq1, stq2 };
+    SpanNearQuery snq = new SpanNearQuery(sqs, 1, true);
+    Spans spans = snq.getSpans(is.getIndexReader());
+
+    TopDocs topDocs = is.search(snq, 1);
+    Set<String> payloadSet = new HashSet<String>();
+    for (int i = 0; i < topDocs.scoreDocs.length; i++) {
+      while (spans.next()) {
+        Collection<byte[]> payloads = spans.getPayload();
+
+        for (final byte [] payload : payloads) {
+          payloadSet.add(new String(payload));
+        }
+      }
+    }
+    assertEquals(2, payloadSet.size());
+    assertTrue(payloadSet.contains("a:Noise:10"));
+    assertTrue(payloadSet.contains("k:Noise:11"));
+    is.close();
+    reader.close();
+    directory.close();
+  }
+  
+  public void testShrinkToAfterShortestMatch2() throws CorruptIndexException,
+      LockObtainFailedException, IOException {
+    Directory directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+                                                     newIndexWriterConfig(TEST_VERSION_CURRENT, new TestPayloadAnalyzer()));
+
+    Document doc = new Document();
+    doc.add(new Field("content", new StringReader("a b a d k f a h i k a k")));
+    writer.addDocument(doc);
+    IndexReader reader = writer.getReader();
+    IndexSearcher is = newSearcher(reader);
+    writer.close();
+
+    SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a"));
+    SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k"));
+    SpanQuery[] sqs = { stq1, stq2 };
+    SpanNearQuery snq = new SpanNearQuery(sqs, 0, true);
+    Spans spans = snq.getSpans(is.getIndexReader());
+
+    TopDocs topDocs = is.search(snq, 1);
+    Set<String> payloadSet = new HashSet<String>();
+    for (int i = 0; i < topDocs.scoreDocs.length; i++) {
+      while (spans.next()) {
+        Collection<byte[]> payloads = spans.getPayload();
+        for (final byte[] payload : payloads) {
+          payloadSet.add(new String(payload));
+        }
+      }
+    }
+    assertEquals(2, payloadSet.size());
+    assertTrue(payloadSet.contains("a:Noise:10"));
+    assertTrue(payloadSet.contains("k:Noise:11"));
+    is.close();
+    reader.close();
+    directory.close();
+  }
+  
+  public void testShrinkToAfterShortestMatch3() throws CorruptIndexException,
+      LockObtainFailedException, IOException {
+    Directory directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+                                                     newIndexWriterConfig(TEST_VERSION_CURRENT, new TestPayloadAnalyzer()));
+
+    Document doc = new Document();
+    doc.add(new Field("content", new StringReader("j k a l f k k p a t a k l k t a")));
+    writer.addDocument(doc);
+    IndexReader reader = writer.getReader();
+    IndexSearcher is = newSearcher(reader);
+    writer.close();
+
+    SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a"));
+    SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k"));
+    SpanQuery[] sqs = { stq1, stq2 };
+    SpanNearQuery snq = new SpanNearQuery(sqs, 0, true);
+    Spans spans = snq.getSpans(is.getIndexReader());
+
+    TopDocs topDocs = is.search(snq, 1);
+    Set<String> payloadSet = new HashSet<String>();
+    for (int i = 0; i < topDocs.scoreDocs.length; i++) {
+      while (spans.next()) {
+        Collection<byte[]> payloads = spans.getPayload();
+
+        for (final byte [] payload : payloads) {
+          payloadSet.add(new String(payload));
+        }
+      }
+    }
+    assertEquals(2, payloadSet.size());
+    if(VERBOSE) {
+      for (final String payload : payloadSet)
+        System.out.println("match:" +  payload);
+      
+    }
+    assertTrue(payloadSet.contains("a:Noise:10"));
+    assertTrue(payloadSet.contains("k:Noise:11"));
+    is.close();
+    reader.close();
+    directory.close();
+  }
+  
+  public void testPayloadSpanUtil() throws Exception {
+    Directory directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+                                                     newIndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer()).setSimilarity(similarity));
+
+    Document doc = new Document();
+    doc.add(newField(PayloadHelper.FIELD,"xx rr yy mm  pp", Field.Store.YES, Field.Index.ANALYZED));
+    writer.addDocument(doc);
+  
+    IndexReader reader = writer.getReader();
+    writer.close();
+    IndexSearcher searcher = newSearcher(reader);
+
+    PayloadSpanUtil psu = new PayloadSpanUtil(searcher.getIndexReader());
+    
+    Collection<byte[]> payloads = psu.getPayloadsForQuery(new TermQuery(new Term(PayloadHelper.FIELD, "rr")));
+    if(VERBOSE)
+      System.out.println("Num payloads:" + payloads.size());
+    for (final byte [] bytes : payloads) {
+      if(VERBOSE)
+        System.out.println(new String(bytes));
+    }
+    searcher.close();
+    reader.close();
+    directory.close();
+  }
+
+  private void checkSpans(Spans spans, int expectedNumSpans, int expectedNumPayloads,
+                          int expectedPayloadLength, int expectedFirstByte) throws IOException {
+    assertTrue("spans is null and it shouldn't be", spans != null);
+    //each position match should have a span associated with it, since there is just one underlying term query, there should
+    //only be one entry in the span
+    int seen = 0;
+    while (spans.next() == true)
+    {
+      //if we expect payloads, then isPayloadAvailable should be true
+      if (expectedNumPayloads > 0) {
+        assertTrue("isPayloadAvailable is not returning the correct value: " + spans.isPayloadAvailable()
+                + " and it should be: " + (expectedNumPayloads >  0),
+                spans.isPayloadAvailable() == true);
+      } else {
+        assertTrue("isPayloadAvailable should be false", spans.isPayloadAvailable() == false);
+      }
+      //See payload helper, for the PayloadHelper.FIELD field, there is a single byte payload at every token
+      if (spans.isPayloadAvailable()) {
+        Collection<byte[]> payload = spans.getPayload();
+        assertTrue("payload Size: " + payload.size() + " is not: " + expectedNumPayloads, payload.size() == expectedNumPayloads);
+        for (final byte [] thePayload : payload) {
+          assertTrue("payload[0] Size: " + thePayload.length + " is not: " + expectedPayloadLength,
+                  thePayload.length == expectedPayloadLength);
+          assertTrue(thePayload[0] + " does not equal: " + expectedFirstByte, thePayload[0] == expectedFirstByte);
+
+        }
+
+      }
+      seen++;
+    }
+    assertTrue(seen + " does not equal: " + expectedNumSpans, seen == expectedNumSpans);
+  }
+  
+  private IndexSearcher getSearcher() throws Exception {
+    directory = newDirectory();
+    String[] docs = new String[]{"xx rr yy mm  pp","xx yy mm rr pp", "nopayload qq ss pp np", "one two three four five six seven eight nine ten eleven", "nine one two three four five six seven eight eleven ten"};
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+                                                     newIndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer()).setSimilarity(similarity));
+
+    Document doc = null;
+    for(int i = 0; i < docs.length; i++) {
+      doc = new Document();
+      String docText = docs[i];
+      doc.add(newField(PayloadHelper.FIELD,docText, Field.Store.YES, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+
+    closeIndexReader = writer.getReader();
+    writer.close();
+
+    IndexSearcher searcher = newSearcher(closeIndexReader);
+    return searcher;
+  }
+  
+  private void checkSpans(Spans spans, int numSpans, int[] numPayloads) throws IOException {
+    int cnt = 0;
+
+    while (spans.next() == true) {
+      if(VERBOSE)
+        System.out.println("\nSpans Dump --");
+      if (spans.isPayloadAvailable()) {
+        Collection<byte[]> payload = spans.getPayload();
+        if(VERBOSE)
+          System.out.println("payloads for span:" + payload.size());
+        for (final byte [] bytes : payload) {
+          if(VERBOSE)
+            System.out.println("doc:" + spans.doc() + " s:" + spans.start() + " e:" + spans.end() + " "
+              + new String(bytes));
+        }
+
+        assertEquals(numPayloads[cnt],payload.size());
+      } else {
+        assertFalse("Expected spans:" + numPayloads[cnt] + " found: 0",numPayloads.length > 0 && numPayloads[cnt] > 0 );
+      }
+      cnt++;
+    }
+
+    assertEquals(numSpans, cnt);
+  }
+
+  final class PayloadAnalyzer extends Analyzer {
+
+    @Override
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      TokenStream result = new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader);
+      result = new PayloadFilter(result, fieldName);
+      return result;
+    }
+  }
+
+  final class PayloadFilter extends TokenFilter {
+    String fieldName;
+    int numSeen = 0;
+    Set<String> entities = new HashSet<String>();
+    Set<String> nopayload = new HashSet<String>();
+    int pos;
+    PayloadAttribute payloadAtt;
+    CharTermAttribute termAtt;
+    PositionIncrementAttribute posIncrAtt;
+
+    public PayloadFilter(TokenStream input, String fieldName) {
+      super(input);
+      this.fieldName = fieldName;
+      pos = 0;
+      entities.add("xx");
+      entities.add("one");
+      nopayload.add("nopayload");
+      nopayload.add("np");
+      termAtt = addAttribute(CharTermAttribute.class);
+      posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+      payloadAtt = addAttribute(PayloadAttribute.class);
+    }
+
+    @Override
+    public boolean incrementToken() throws IOException {
+      if (input.incrementToken()) {
+        String token = termAtt.toString();
+
+        if (!nopayload.contains(token)) {
+          if (entities.contains(token)) {
+            payloadAtt.setPayload(new Payload((token + ":Entity:"+ pos ).getBytes()));
+          } else {
+            payloadAtt.setPayload(new Payload((token + ":Noise:" + pos ).getBytes()));
+          }
+        }
+        pos += posIncrAtt.getPositionIncrement();
+        return true;
+      }
+      return false;
+    }
+  }
+  
+  public final class TestPayloadAnalyzer extends Analyzer {
+
+    @Override
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      TokenStream result = new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader);
+      result = new PayloadFilter(result, fieldName);
+      return result;
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpanExplanations.java b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpanExplanations.java
new file mode 100644
index 0000000..c5da9d5
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpanExplanations.java
@@ -0,0 +1,177 @@
+package org.apache.lucene.search.spans;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.*;
+
+
+/**
+ * TestExplanations subclass focusing on span queries
+ */
+public class TestSpanExplanations extends TestExplanations {
+
+  /* simple SpanTermQueries */
+  
+  public void testST1() throws Exception {
+    SpanQuery q = st("w1");
+    qtest(q, new int[] {0,1,2,3});
+  }
+  public void testST2() throws Exception {
+    SpanQuery q = st("w1");
+    q.setBoost(1000);
+    qtest(q, new int[] {0,1,2,3});
+  }
+  public void testST4() throws Exception {
+    SpanQuery q = st("xx");
+    qtest(q, new int[] {2,3});
+  }
+  public void testST5() throws Exception {
+    SpanQuery q = st("xx");
+    q.setBoost(1000);
+    qtest(q, new int[] {2,3});
+  }
+
+  /* some SpanFirstQueries */
+  
+  public void testSF1() throws Exception {
+    SpanQuery q = sf(("w1"),1);
+    qtest(q, new int[] {0,1,2,3});
+  }
+  public void testSF2() throws Exception {
+    SpanQuery q = sf(("w1"),1);
+    q.setBoost(1000);
+    qtest(q, new int[] {0,1,2,3});
+  }
+  public void testSF4() throws Exception {
+    SpanQuery q = sf(("xx"),2);
+    qtest(q, new int[] {2});
+  }
+  public void testSF5() throws Exception {
+    SpanQuery q = sf(("yy"),2);
+    qtest(q, new int[] { });
+  }
+  public void testSF6() throws Exception {
+    SpanQuery q = sf(("yy"),4);
+    q.setBoost(1000);
+    qtest(q, new int[] {2});
+  }
+  
+  /* some SpanOrQueries */
+
+  public void testSO1() throws Exception {
+    SpanQuery q = sor("w1","QQ");
+    qtest(q, new int[] {0,1,2,3});
+  }
+  public void testSO2() throws Exception {
+    SpanQuery q = sor("w1","w3","zz");
+    qtest(q, new int[] {0,1,2,3});
+  }
+  public void testSO3() throws Exception {
+    SpanQuery q = sor("w5","QQ","yy");
+    qtest(q, new int[] {0,2,3});
+  }
+  public void testSO4() throws Exception {
+    SpanQuery q = sor("w5","QQ","yy");
+    qtest(q, new int[] {0,2,3});
+  }
+
+  
+  
+  /* some SpanNearQueries */
+  
+  public void testSNear1() throws Exception {
+    SpanQuery q = snear("w1","QQ",100,true);
+    qtest(q, new int[] {});
+  }
+  public void testSNear2() throws Exception {
+    SpanQuery q = snear("w1","xx",100,true);
+    qtest(q, new int[] {2,3});
+  }
+  public void testSNear3() throws Exception {
+    SpanQuery q = snear("w1","xx",0,true);
+    qtest(q, new int[] {2});
+  }
+  public void testSNear4() throws Exception {
+    SpanQuery q = snear("w1","xx",1,true);
+    qtest(q, new int[] {2,3});
+  }
+  public void testSNear5() throws Exception {
+    SpanQuery q = snear("xx","w1",0,false);
+    qtest(q, new int[] {2});
+  }
+
+  public void testSNear6() throws Exception {
+    SpanQuery q = snear("w1","w2","QQ",100,true);
+    qtest(q, new int[] {});
+  }
+  public void testSNear7() throws Exception {
+    SpanQuery q = snear("w1","xx","w2",100,true);
+    qtest(q, new int[] {2,3});
+  }
+  public void testSNear8() throws Exception {
+    SpanQuery q = snear("w1","xx","w2",0,true);
+    qtest(q, new int[] {2});
+  }
+  public void testSNear9() throws Exception {
+    SpanQuery q = snear("w1","xx","w2",1,true);
+    qtest(q, new int[] {2,3});
+  }
+  public void testSNear10() throws Exception {
+    SpanQuery q = snear("xx","w1","w2",0,false);
+    qtest(q, new int[] {2});
+  }
+  public void testSNear11() throws Exception {
+    SpanQuery q = snear("w1","w2","w3",1,true);
+    qtest(q, new int[] {0,1});
+  }
+
+  
+  /* some SpanNotQueries */
+
+  public void testSNot1() throws Exception {
+    SpanQuery q = snot(sf("w1",10),st("QQ"));
+    qtest(q, new int[] {0,1,2,3});
+  }
+  public void testSNot2() throws Exception {
+    SpanQuery q = snot(sf("w1",10),st("QQ"));
+    q.setBoost(1000);
+    qtest(q, new int[] {0,1,2,3});
+  }
+  public void testSNot4() throws Exception {
+    SpanQuery q = snot(sf("w1",10),st("xx"));
+    qtest(q, new int[] {0,1,2,3});
+  }
+  public void testSNot5() throws Exception {
+    SpanQuery q = snot(sf("w1",10),st("xx"));
+    q.setBoost(1000);
+    qtest(q, new int[] {0,1,2,3});
+  }
+  public void testSNot7() throws Exception {
+    SpanQuery f = snear("w1","w3",10,true);
+    f.setBoost(1000);
+    SpanQuery q = snot(f, st("xx"));
+    qtest(q, new int[] {0,1,3});
+  }
+  public void testSNot10() throws Exception {
+    SpanQuery t = st("xx");
+    t.setBoost(10000);
+    SpanQuery q = snot(snear("w1","w3",10,true), t);
+    qtest(q, new int[] {0,1,3});
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpanExplanationsOfNonMatches.java b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpanExplanationsOfNonMatches.java
new file mode 100644
index 0000000..ed6406d
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpanExplanationsOfNonMatches.java
@@ -0,0 +1,41 @@
+package org.apache.lucene.search.spans;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.CheckHits;
+
+
+/**
+ * subclass of TestSimpleExplanations that verifies non matches.
+ */
+public class TestSpanExplanationsOfNonMatches
+  extends TestSpanExplanations {
+
+  /**
+   * Overrides superclass to ignore matches and focus on non-matches
+   *
+   * @see CheckHits#checkNoMatchExplanations
+   */
+  @Override
+  public void qtest(Query q, int[] expDocNrs) throws Exception {
+    CheckHits.checkNoMatchExplanations(q, FIELD, searcher, expDocNrs);
+  }
+    
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpanFirstQuery.java b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpanFirstQuery.java
new file mode 100644
index 0000000..f91998d
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpanFirstQuery.java
@@ -0,0 +1,63 @@
+package org.apache.lucene.search.spans;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.StopAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestSpanFirstQuery extends LuceneTestCase {
+  public void testStartPositions() throws Exception {
+    Directory dir = newDirectory();
+    
+    // mimic StopAnalyzer
+    Analyzer analyzer = new StopAnalyzer(TEST_VERSION_CURRENT);
+    
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir, analyzer);
+    Document doc = new Document();
+    doc.add(newField("field", "the quick brown fox", Field.Index.ANALYZED));
+    writer.addDocument(doc);
+    Document doc2 = new Document();
+    doc2.add(newField("field", "quick brown fox", Field.Index.ANALYZED));
+    writer.addDocument(doc2);
+    
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = newSearcher(reader);
+    
+    // user queries on "starts-with quick"
+    SpanQuery sfq = new SpanFirstQuery(new SpanTermQuery(new Term("field", "quick")), 1);
+    assertEquals(1, searcher.search(sfq, 10).totalHits);
+    
+    // user queries on "starts-with the quick"
+    SpanQuery include = new SpanFirstQuery(new SpanTermQuery(new Term("field", "quick")), 2);
+    sfq = new SpanNotQuery(include, sfq);
+    assertEquals(1, searcher.search(sfq, 10).totalHits);
+    
+    writer.close();
+    searcher.close();
+    reader.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java
new file mode 100644
index 0000000..9470788
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java
@@ -0,0 +1,100 @@
+package org.apache.lucene.search.spans;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.FuzzyQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.WildcardQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Tests for {@link SpanMultiTermQueryWrapper}, wrapping a few MultiTermQueries.
+ */
+public class TestSpanMultiTermQueryWrapper extends LuceneTestCase {
+  private Directory directory;
+  private IndexReader reader;
+  private Searcher searcher;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    directory = newDirectory();
+    RandomIndexWriter iw = new RandomIndexWriter(random, directory);
+    Document doc = new Document();
+    Field field = newField("field", "", Field.Store.NO, Field.Index.ANALYZED);
+    doc.add(field);
+    
+    field.setValue("quick brown fox");
+    iw.addDocument(doc);
+    field.setValue("jumps over lazy broun dog");
+    iw.addDocument(doc);
+    field.setValue("jumps over extremely very lazy broxn dog");
+    iw.addDocument(doc);
+    reader = iw.getReader();
+    iw.close();
+    searcher = newSearcher(reader);
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    searcher.close();
+    reader.close();
+    directory.close();
+    super.tearDown();
+  }
+  
+  public void testWildcard() throws Exception {
+    WildcardQuery wq = new WildcardQuery(new Term("field", "bro?n"));
+    SpanQuery swq = new SpanMultiTermQueryWrapper<WildcardQuery>(wq);
+    // will only match quick brown fox
+    SpanFirstQuery sfq = new SpanFirstQuery(swq, 2);
+    assertEquals(1, searcher.search(sfq, 10).totalHits);
+  }
+  
+  public void testPrefix() throws Exception {
+    WildcardQuery wq = new WildcardQuery(new Term("field", "extrem*"));
+    SpanQuery swq = new SpanMultiTermQueryWrapper<WildcardQuery>(wq);
+    // will only match "jumps over extremely very lazy broxn dog"
+    SpanFirstQuery sfq = new SpanFirstQuery(swq, 3);
+    assertEquals(1, searcher.search(sfq, 10).totalHits);
+  }
+  
+  public void testFuzzy() throws Exception {
+    FuzzyQuery fq = new FuzzyQuery(new Term("field", "broan"));
+    SpanQuery sfq = new SpanMultiTermQueryWrapper<FuzzyQuery>(fq);
+    // will not match quick brown fox
+    SpanPositionRangeQuery sprq = new SpanPositionRangeQuery(sfq, 3, 6);
+    assertEquals(2, searcher.search(sprq, 10).totalHits);
+  }
+  
+  public void testFuzzy2() throws Exception {
+    // maximum of 1 term expansion
+    FuzzyQuery fq = new FuzzyQuery(new Term("field", "broan"), 0.5f, 0, 1);
+    SpanQuery sfq = new SpanMultiTermQueryWrapper<FuzzyQuery>(fq);
+    // will only match jumps over lazy broun dog
+    SpanPositionRangeQuery sprq = new SpanPositionRangeQuery(sfq, 0, 100);
+    assertEquals(1, searcher.search(sprq, 10).totalHits);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpans.java b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpans.java
new file mode 100644
index 0000000..6610124
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpans.java
@@ -0,0 +1,492 @@
+package org.apache.lucene.search.spans;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.CheckHits;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+
+public class TestSpans extends LuceneTestCase {
+  private IndexSearcher searcher;
+  private IndexReader reader;
+  private Directory directory;
+  
+  public static final String field = "field";
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    directory = newDirectory();
+    RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    for (int i = 0; i < docFields.length; i++) {
+      Document doc = new Document();
+      doc.add(newField(field, docFields[i], Field.Store.YES, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+    reader = writer.getReader();
+    writer.close();
+    searcher = newSearcher(reader);
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    searcher.close();
+    reader.close();
+    directory.close();
+    super.tearDown();
+  }
+  
+  private String[] docFields = {
+    "w1 w2 w3 w4 w5",
+    "w1 w3 w2 w3",
+    "w1 xx w2 yy w3",
+    "w1 w3 xx w2 yy w3",
+    "u2 u2 u1",
+    "u2 xx u2 u1",
+    "u2 u2 xx u1",
+    "u2 xx u2 yy u1",
+    "u2 xx u1 u2",
+    "u2 u1 xx u2",
+    "u1 u2 xx u2",
+    "t1 t2 t1 t3 t2 t3"
+  };
+
+  public SpanTermQuery makeSpanTermQuery(String text) {
+    return new SpanTermQuery(new Term(field, text));
+  }
+  
+  private void checkHits(Query query, int[] results) throws IOException {
+    CheckHits.checkHits(random, query, field, searcher, results);
+  }
+  
+  private void orderedSlopTest3SQ(
+        SpanQuery q1,
+        SpanQuery q2,
+        SpanQuery q3,
+        int slop,
+        int[] expectedDocs) throws IOException {
+    boolean ordered = true;
+    SpanNearQuery snq = new SpanNearQuery( new SpanQuery[]{q1,q2,q3}, slop, ordered);
+    checkHits(snq, expectedDocs);
+  }
+  
+  public void orderedSlopTest3(int slop, int[] expectedDocs) throws IOException {
+    orderedSlopTest3SQ(
+       makeSpanTermQuery("w1"),
+       makeSpanTermQuery("w2"),
+       makeSpanTermQuery("w3"),
+       slop,
+       expectedDocs);
+  }
+  
+  public void orderedSlopTest3Equal(int slop, int[] expectedDocs) throws IOException {
+    orderedSlopTest3SQ(
+       makeSpanTermQuery("w1"),
+       makeSpanTermQuery("w3"),
+       makeSpanTermQuery("w3"),
+       slop,
+       expectedDocs);
+  }
+  
+  public void orderedSlopTest1Equal(int slop, int[] expectedDocs) throws IOException {
+    orderedSlopTest3SQ(
+       makeSpanTermQuery("u2"),
+       makeSpanTermQuery("u2"),
+       makeSpanTermQuery("u1"),
+       slop,
+       expectedDocs);
+  }
+  
+  public void testSpanNearOrdered01() throws Exception {
+    orderedSlopTest3(0, new int[] {0});
+  }
+
+  public void testSpanNearOrdered02() throws Exception {
+    orderedSlopTest3(1, new int[] {0,1});
+  }
+
+  public void testSpanNearOrdered03() throws Exception {
+    orderedSlopTest3(2, new int[] {0,1,2});
+  }
+
+  public void testSpanNearOrdered04() throws Exception {
+    orderedSlopTest3(3, new int[] {0,1,2,3});
+  }
+
+  public void testSpanNearOrdered05() throws Exception {
+    orderedSlopTest3(4, new int[] {0,1,2,3});
+  }
+  
+  public void testSpanNearOrderedEqual01() throws Exception {
+    orderedSlopTest3Equal(0, new int[] {});
+  }
+
+  public void testSpanNearOrderedEqual02() throws Exception {
+    orderedSlopTest3Equal(1, new int[] {1});
+  }
+
+  public void testSpanNearOrderedEqual03() throws Exception {
+    orderedSlopTest3Equal(2, new int[] {1});
+  }
+
+  public void testSpanNearOrderedEqual04() throws Exception {
+    orderedSlopTest3Equal(3, new int[] {1,3});
+  }
+  
+  public void testSpanNearOrderedEqual11() throws Exception {
+    orderedSlopTest1Equal(0, new int[] {4});
+  }
+  
+  public void testSpanNearOrderedEqual12() throws Exception {
+    orderedSlopTest1Equal(0, new int[] {4});
+  }
+  
+  public void testSpanNearOrderedEqual13() throws Exception {
+    orderedSlopTest1Equal(1, new int[] {4,5,6});
+  }
+  
+  public void testSpanNearOrderedEqual14() throws Exception {
+    orderedSlopTest1Equal(2, new int[] {4,5,6,7});
+  }
+
+  public void testSpanNearOrderedEqual15() throws Exception {
+    orderedSlopTest1Equal(3, new int[] {4,5,6,7});
+  }
+
+  public void testSpanNearOrderedOverlap() throws Exception {
+    boolean ordered = true;
+    int slop = 1;
+    SpanNearQuery snq = new SpanNearQuery(
+                              new SpanQuery[] {
+                                makeSpanTermQuery("t1"),
+                                makeSpanTermQuery("t2"),
+                                makeSpanTermQuery("t3") },
+                              slop,
+                              ordered);
+    Spans spans = snq.getSpans(searcher.getIndexReader());
+
+    assertTrue("first range", spans.next());
+    assertEquals("first doc", 11, spans.doc());
+    assertEquals("first start", 0, spans.start());
+    assertEquals("first end", 4, spans.end());
+
+    assertTrue("second range", spans.next());
+    assertEquals("second doc", 11, spans.doc());
+    assertEquals("second start", 2, spans.start());
+    assertEquals("second end", 6, spans.end());
+
+    assertFalse("third range", spans.next());
+  }
+
+
+  public void testSpanNearUnOrdered() throws Exception {
+
+    //See http://www.gossamer-threads.com/lists/lucene/java-dev/52270 for discussion about this test
+    SpanNearQuery snq;
+    snq = new SpanNearQuery(
+                              new SpanQuery[] {
+                                makeSpanTermQuery("u1"),
+                                makeSpanTermQuery("u2") },
+                              0,
+                              false);
+    Spans spans = snq.getSpans(searcher.getIndexReader());
+    assertTrue("Does not have next and it should", spans.next());
+    assertEquals("doc", 4, spans.doc());
+    assertEquals("start", 1, spans.start());
+    assertEquals("end", 3, spans.end());
+
+    assertTrue("Does not have next and it should", spans.next());
+    assertEquals("doc", 5, spans.doc());
+    assertEquals("start", 2, spans.start());
+    assertEquals("end", 4, spans.end());
+
+    assertTrue("Does not have next and it should", spans.next());
+    assertEquals("doc", 8, spans.doc());
+    assertEquals("start", 2, spans.start());
+    assertEquals("end", 4, spans.end());
+
+    assertTrue("Does not have next and it should", spans.next());
+    assertEquals("doc", 9, spans.doc());
+    assertEquals("start", 0, spans.start());
+    assertEquals("end", 2, spans.end());
+
+    assertTrue("Does not have next and it should", spans.next());
+    assertEquals("doc", 10, spans.doc());
+    assertEquals("start", 0, spans.start());
+    assertEquals("end", 2, spans.end());
+    assertTrue("Has next and it shouldn't: " + spans.doc(), spans.next() == false);
+
+    SpanNearQuery u1u2 = new SpanNearQuery(new SpanQuery[]{makeSpanTermQuery("u1"),
+                                makeSpanTermQuery("u2")}, 0, false);
+    snq = new SpanNearQuery(
+                              new SpanQuery[] {
+                                u1u2,
+                                makeSpanTermQuery("u2")
+                              },
+                              1,
+                              false);
+    spans = snq.getSpans(searcher.getIndexReader());
+    assertTrue("Does not have next and it should", spans.next());
+    assertEquals("doc", 4, spans.doc());
+    assertEquals("start", 0, spans.start());
+    assertEquals("end", 3, spans.end());
+
+    assertTrue("Does not have next and it should", spans.next());
+    //unordered spans can be subsets
+    assertEquals("doc", 4, spans.doc());
+    assertEquals("start", 1, spans.start());
+    assertEquals("end", 3, spans.end());
+
+    assertTrue("Does not have next and it should", spans.next());
+    assertEquals("doc", 5, spans.doc());
+    assertEquals("start", 0, spans.start());
+    assertEquals("end", 4, spans.end());
+
+    assertTrue("Does not have next and it should", spans.next());
+    assertEquals("doc", 5, spans.doc());
+    assertEquals("start", 2, spans.start());
+    assertEquals("end", 4, spans.end());
+
+    assertTrue("Does not have next and it should", spans.next());
+    assertEquals("doc", 8, spans.doc());
+    assertEquals("start", 0, spans.start());
+    assertEquals("end", 4, spans.end());
+
+
+    assertTrue("Does not have next and it should", spans.next());
+    assertEquals("doc", 8, spans.doc());
+    assertEquals("start", 2, spans.start());
+    assertEquals("end", 4, spans.end());
+
+    assertTrue("Does not have next and it should", spans.next());
+    assertEquals("doc", 9, spans.doc());
+    assertEquals("start", 0, spans.start());
+    assertEquals("end", 2, spans.end());
+
+    assertTrue("Does not have next and it should", spans.next());
+    assertEquals("doc", 9, spans.doc());
+    assertEquals("start", 0, spans.start());
+    assertEquals("end", 4, spans.end());
+
+    assertTrue("Does not have next and it should", spans.next());
+    assertEquals("doc", 10, spans.doc());
+    assertEquals("start", 0, spans.start());
+    assertEquals("end", 2, spans.end());
+
+    assertTrue("Has next and it shouldn't", spans.next() == false);
+  }
+
+
+
+  private Spans orSpans(String[] terms) throws Exception {
+    SpanQuery[] sqa = new SpanQuery[terms.length];
+    for (int i = 0; i < terms.length; i++) {
+      sqa[i] = makeSpanTermQuery(terms[i]);
+    }
+    return (new SpanOrQuery(sqa)).getSpans(searcher.getIndexReader());
+  }
+
+  private void tstNextSpans(Spans spans, int doc, int start, int end)
+  throws Exception {
+    assertTrue("next", spans.next());
+    assertEquals("doc", doc, spans.doc());
+    assertEquals("start", start, spans.start());
+    assertEquals("end", end, spans.end());
+  }
+
+  public void testSpanOrEmpty() throws Exception {
+    Spans spans = orSpans(new String[0]);
+    assertFalse("empty next", spans.next());
+
+    SpanOrQuery a = new SpanOrQuery( new SpanQuery[0] );
+    SpanOrQuery b = new SpanOrQuery( new SpanQuery[0] );
+    assertTrue("empty should equal", a.equals(b));
+  }
+
+  public void testSpanOrSingle() throws Exception {
+    Spans spans = orSpans(new String[] {"w5"});
+    tstNextSpans(spans, 0, 4, 5);
+    assertFalse("final next", spans.next());
+  }
+  
+  public void testSpanOrMovesForward() throws Exception {
+    Spans spans = orSpans(new String[] {"w1", "xx"});
+
+    spans.next();
+    int doc = spans.doc();
+    assertEquals(0, doc);
+    
+    spans.skipTo(0);
+    doc = spans.doc();
+    
+    // LUCENE-1583:
+    // according to Spans, a skipTo to the same doc or less
+    // should still call next() on the underlying Spans
+    assertEquals(1, doc);
+
+  }
+  
+  public void testSpanOrDouble() throws Exception {
+    Spans spans = orSpans(new String[] {"w5", "yy"});
+    tstNextSpans(spans, 0, 4, 5);
+    tstNextSpans(spans, 2, 3, 4);
+    tstNextSpans(spans, 3, 4, 5);
+    tstNextSpans(spans, 7, 3, 4);
+    assertFalse("final next", spans.next());
+  }
+
+  public void testSpanOrDoubleSkip() throws Exception {
+    Spans spans = orSpans(new String[] {"w5", "yy"});
+    assertTrue("initial skipTo", spans.skipTo(3));
+    assertEquals("doc", 3, spans.doc());
+    assertEquals("start", 4, spans.start());
+    assertEquals("end", 5, spans.end());
+    tstNextSpans(spans, 7, 3, 4);
+    assertFalse("final next", spans.next());
+  }
+
+  public void testSpanOrUnused() throws Exception {
+    Spans spans = orSpans(new String[] {"w5", "unusedTerm", "yy"});
+    tstNextSpans(spans, 0, 4, 5);
+    tstNextSpans(spans, 2, 3, 4);
+    tstNextSpans(spans, 3, 4, 5);
+    tstNextSpans(spans, 7, 3, 4);
+    assertFalse("final next", spans.next());
+  }
+
+  public void testSpanOrTripleSameDoc() throws Exception {
+    Spans spans = orSpans(new String[] {"t1", "t2", "t3"});
+    tstNextSpans(spans, 11, 0, 1);
+    tstNextSpans(spans, 11, 1, 2);
+    tstNextSpans(spans, 11, 2, 3);
+    tstNextSpans(spans, 11, 3, 4);
+    tstNextSpans(spans, 11, 4, 5);
+    tstNextSpans(spans, 11, 5, 6);
+    assertFalse("final next", spans.next());
+  }
+
+  public void testSpanScorerZeroSloppyFreq() throws Exception {
+    boolean ordered = true;
+    int slop = 1;
+
+    final Similarity sim = new DefaultSimilarity() {
+      @Override
+      public float sloppyFreq(int distance) {
+        return 0.0f;
+      }
+    };
+
+    SpanNearQuery snq = new SpanNearQuery(
+                              new SpanQuery[] {
+                                makeSpanTermQuery("t1"),
+                                makeSpanTermQuery("t2") },
+                              slop,
+                              ordered) {
+      @Override
+      public Similarity getSimilarity(Searcher s) {
+        return sim;
+      }
+      };
+
+    Scorer spanScorer = searcher.createNormalizedWeight(snq).scorer(searcher.getIndexReader(), true, false);
+
+    assertTrue("first doc", spanScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+    assertEquals("first doc number", spanScorer.docID(), 11);
+    float score = spanScorer.score();
+    assertTrue("first doc score should be zero, " + score, score == 0.0f);
+    assertTrue("no second doc", spanScorer.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
+  }
+
+  // LUCENE-1404
+  private void addDoc(IndexWriter writer, String id, String text) throws IOException {
+    final Document doc = new Document();
+    doc.add( newField("id", id, Field.Store.YES, Field.Index.NOT_ANALYZED) );
+    doc.add( newField("text", text, Field.Store.YES, Field.Index.ANALYZED) );
+    writer.addDocument(doc);
+  }
+
+  // LUCENE-1404
+  private int hitCount(Searcher searcher, String word) throws Throwable {
+    return searcher.search(new TermQuery(new Term("text", word)), 10).totalHits;
+  }
+
+  // LUCENE-1404
+  private SpanQuery createSpan(String value) {
+    return new SpanTermQuery(new Term("text", value));
+  }                     
+  
+  // LUCENE-1404
+  private SpanQuery createSpan(int slop, boolean ordered, SpanQuery[] clauses) {
+    return new SpanNearQuery(clauses, slop, ordered);
+  }
+
+  // LUCENE-1404
+  private SpanQuery createSpan(int slop, boolean ordered, String term1, String term2) {
+    return createSpan(slop, ordered, new SpanQuery[] {createSpan(term1), createSpan(term2)});
+  }
+
+  // LUCENE-1404
+  public void testNPESpanQuery() throws Throwable {
+    final Directory dir = newDirectory();
+    final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+
+    // Add documents
+    addDoc(writer, "1", "the big dogs went running to the market");
+    addDoc(writer, "2", "the cat chased the mouse, then the cat ate the mouse quickly");
+    
+    // Commit
+    writer.close();
+
+    // Get searcher
+    final IndexReader reader = IndexReader.open(dir, true);
+    final IndexSearcher searcher = newSearcher(reader);
+
+    // Control (make sure docs indexed)
+    assertEquals(2, hitCount(searcher, "the"));
+    assertEquals(1, hitCount(searcher, "cat"));
+    assertEquals(1, hitCount(searcher, "dogs"));
+    assertEquals(0, hitCount(searcher, "rabbit"));
+
+    // This throws exception (it shouldn't)
+    assertEquals(1,
+                 searcher.search(createSpan(0, true,                                 
+                                            new SpanQuery[] {createSpan(4, false, "chased", "cat"),
+                                                             createSpan("ate")}), 10).totalHits);
+    searcher.close();
+    reader.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
new file mode 100644
index 0000000..ce5c95f
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
@@ -0,0 +1,175 @@
+package org.apache.lucene.search.spans;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+
+/*******************************************************************************
+ * Tests the span query bug in Lucene. It demonstrates that SpanTermQuerys don't
+ * work correctly in a BooleanQuery.
+ * 
+ */
+public class TestSpansAdvanced extends LuceneTestCase {
+  
+  // location to the index
+  protected Directory mDirectory;
+  protected IndexReader reader;
+  protected IndexSearcher searcher;
+  
+  // field names in the index
+  private final static String FIELD_ID = "ID";
+  protected final static String FIELD_TEXT = "TEXT";
+  
+  /**
+   * Initializes the tests by adding 4 identical documents to the index.
+   */
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    // create test index
+    mDirectory = newDirectory();
+    final RandomIndexWriter writer = new RandomIndexWriter(random,
+        mDirectory, newIndexWriterConfig(TEST_VERSION_CURRENT,
+                                         new StandardAnalyzer(TEST_VERSION_CURRENT)).setMergePolicy(newLogMergePolicy()));
+
+    addDocument(writer, "1", "I think it should work.");
+    addDocument(writer, "2", "I think it should work.");
+    addDocument(writer, "3", "I think it should work.");
+    addDocument(writer, "4", "I think it should work.");
+    reader = writer.getReader();
+    writer.close();
+    searcher = newSearcher(reader);
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    searcher.close();
+    reader.close();
+    mDirectory.close();
+    mDirectory = null;
+    super.tearDown();
+  }
+  
+  /**
+   * Adds the document to the index.
+   * 
+   * @param writer the Lucene index writer
+   * @param id the unique id of the document
+   * @param text the text of the document
+   * @throws IOException
+   */
+  protected void addDocument(final RandomIndexWriter writer, final String id,
+      final String text) throws IOException {
+    
+    final Document document = new Document();
+    document.add(newField(FIELD_ID, id, Field.Store.YES,
+        Field.Index.NOT_ANALYZED));
+    document.add(newField(FIELD_TEXT, text, Field.Store.YES,
+        Field.Index.ANALYZED));
+    writer.addDocument(document);
+  }
+  
+  /**
+   * Tests two span queries.
+   * 
+   * @throws IOException
+   */
+  public void testBooleanQueryWithSpanQueries() throws IOException {
+    
+    doTestBooleanQueryWithSpanQueries(searcher, 0.3884282f);
+  }
+  
+  /**
+   * Tests two span queries.
+   * 
+   * @throws IOException
+   */
+  protected void doTestBooleanQueryWithSpanQueries(IndexSearcher s,
+      final float expectedScore) throws IOException {
+    
+    final Query spanQuery = new SpanTermQuery(new Term(FIELD_TEXT, "work"));
+    final BooleanQuery query = new BooleanQuery();
+    query.add(spanQuery, BooleanClause.Occur.MUST);
+    query.add(spanQuery, BooleanClause.Occur.MUST);
+    final String[] expectedIds = new String[] {"1", "2", "3", "4"};
+    final float[] expectedScores = new float[] {expectedScore, expectedScore,
+        expectedScore, expectedScore};
+    assertHits(s, query, "two span queries", expectedIds, expectedScores);
+  }
+  
+  /**
+   * Checks to see if the hits are what we expected.
+   * 
+   * @param query the query to execute
+   * @param description the description of the search
+   * @param expectedIds the expected document ids of the hits
+   * @param expectedScores the expected scores of the hits
+   * 
+   * @throws IOException
+   */
+  protected static void assertHits(Searcher s, Query query,
+      final String description, final String[] expectedIds,
+      final float[] expectedScores) throws IOException {
+    QueryUtils.check(random, query, s);
+    
+    final float tolerance = 1e-5f;
+    
+    // Hits hits = searcher.search(query);
+    // hits normalizes and throws things off if one score is greater than 1.0
+    TopDocs topdocs = s.search(query, null, 10000);
+    
+    /*****
+     * // display the hits System.out.println(hits.length() +
+     * " hits for search: \"" + description + '\"'); for (int i = 0; i <
+     * hits.length(); i++) { System.out.println("  " + FIELD_ID + ':' +
+     * hits.doc(i).get(FIELD_ID) + " (score:" + hits.score(i) + ')'); }
+     *****/
+    
+    // did we get the hits we expected
+    assertEquals(expectedIds.length, topdocs.totalHits);
+    for (int i = 0; i < topdocs.totalHits; i++) {
+      // System.out.println(i + " exp: " + expectedIds[i]);
+      // System.out.println(i + " field: " + hits.doc(i).get(FIELD_ID));
+      
+      int id = topdocs.scoreDocs[i].doc;
+      float score = topdocs.scoreDocs[i].score;
+      Document doc = s.doc(id);
+      assertEquals(expectedIds[i], doc.get(FIELD_ID));
+      boolean scoreEq = Math.abs(expectedScores[i] - score) < tolerance;
+      if (!scoreEq) {
+        System.out.println(i + " warning, expected score: " + expectedScores[i]
+            + ", actual " + score);
+        System.out.println(s.explain(query, id));
+      }
+      assertEquals(expectedScores[i], score, tolerance);
+      assertEquals(s.explain(query, id).getValue(), score, tolerance);
+    }
+  }
+  
+}
\ No newline at end of file
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java
new file mode 100644
index 0000000..27c3ca1
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java
@@ -0,0 +1,124 @@
+package org.apache.lucene.search.spans;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.*;
+
+/*******************************************************************************
+ * Some expanded tests to make sure my patch doesn't break other SpanTermQuery
+ * functionality.
+ * 
+ */
+public class TestSpansAdvanced2 extends TestSpansAdvanced {
+  IndexSearcher searcher2;
+  IndexReader reader2;
+  
+  /**
+   * Initializes the tests by adding documents to the index.
+   */
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    
+    // create test index
+    final RandomIndexWriter writer = new RandomIndexWriter(random, mDirectory,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT))
+                                                           .setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy()));
+    addDocument(writer, "A", "Should we, could we, would we?");
+    addDocument(writer, "B", "It should.  Should it?");
+    addDocument(writer, "C", "It shouldn't.");
+    addDocument(writer, "D", "Should we, should we, should we.");
+    reader2 = writer.getReader();
+    writer.close();
+    
+    // re-open the searcher since we added more docs
+    searcher2 = newSearcher(reader2);
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    searcher2.close();
+    reader2.close();
+    super.tearDown();
+  }
+  
+  /**
+   * Verifies that the index has the correct number of documents.
+   * 
+   * @throws Exception
+   */
+  public void testVerifyIndex() throws Exception {
+    final IndexReader reader = IndexReader.open(mDirectory, true);
+    assertEquals(8, reader.numDocs());
+    reader.close();
+  }
+  
+  /**
+   * Tests a single span query that matches multiple documents.
+   * 
+   * @throws IOException
+   */
+  public void testSingleSpanQuery() throws IOException {
+    
+    final Query spanQuery = new SpanTermQuery(new Term(FIELD_TEXT, "should"));
+    final String[] expectedIds = new String[] {"B", "D", "1", "2", "3", "4",
+        "A"};
+    final float[] expectedScores = new float[] {0.625f, 0.45927936f,
+        0.35355338f, 0.35355338f, 0.35355338f, 0.35355338f, 0.26516503f,};
+    assertHits(searcher2, spanQuery, "single span query", expectedIds,
+        expectedScores);
+  }
+  
+  /**
+   * Tests a single span query that matches multiple documents.
+   * 
+   * @throws IOException
+   */
+  public void testMultipleDifferentSpanQueries() throws IOException {
+    
+    final Query spanQuery1 = new SpanTermQuery(new Term(FIELD_TEXT, "should"));
+    final Query spanQuery2 = new SpanTermQuery(new Term(FIELD_TEXT, "we"));
+    final BooleanQuery query = new BooleanQuery();
+    query.add(spanQuery1, BooleanClause.Occur.MUST);
+    query.add(spanQuery2, BooleanClause.Occur.MUST);
+    final String[] expectedIds = new String[] {"D", "A"};
+    // these values were pre LUCENE-413
+    // final float[] expectedScores = new float[] { 0.93163157f, 0.20698164f };
+    final float[] expectedScores = new float[] {1.0191123f, 0.93163157f};
+    assertHits(searcher2, query, "multiple different span queries",
+        expectedIds, expectedScores);
+  }
+  
+  /**
+   * Tests two span queries.
+   * 
+   * @throws IOException
+   */
+  @Override
+  public void testBooleanQueryWithSpanQueries() throws IOException {
+    
+    doTestBooleanQueryWithSpanQueries(searcher2, 0.73500174f);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/store/TestBufferedIndexInput.java b/lucene/backwards/src/test/org/apache/lucene/store/TestBufferedIndexInput.java
new file mode 100755
index 0000000..0a0bc45
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/store/TestBufferedIndexInput.java
@@ -0,0 +1,381 @@
+package org.apache.lucene.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.NIOFSDirectory.NIOFSIndexInput;
+import org.apache.lucene.store.SimpleFSDirectory.SimpleFSIndexInput;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import org.apache.lucene.util.ArrayUtil;
+
+public class TestBufferedIndexInput extends LuceneTestCase {
+  
+  private static void writeBytes(File aFile, long size) throws IOException{
+    OutputStream stream = null;
+    try {
+      stream = new FileOutputStream(aFile);
+      for (int i = 0; i < size; i++) {
+        stream.write(byten(i));  
+      }
+      stream.flush();
+    } finally {
+      if (stream != null) {
+        stream.close();
+      }
+    }
+  }
+
+  private static final long TEST_FILE_LENGTH = 100*1024;
+ 
+  // Call readByte() repeatedly, past the buffer boundary, and see that it
+  // is working as expected.
+  // Our input comes from a dynamically generated/ "file" - see
+  // MyBufferedIndexInput below.
+  public void testReadByte() throws Exception {
+    MyBufferedIndexInput input = new MyBufferedIndexInput();
+    for (int i = 0; i < BufferedIndexInput.BUFFER_SIZE * 10; i++) {
+      assertEquals(input.readByte(), byten(i));
+    }
+  }
+ 
+  // Call readBytes() repeatedly, with various chunk sizes (from 1 byte to
+  // larger than the buffer size), and see that it returns the bytes we expect.
+  // Our input comes from a dynamically generated "file" -
+  // see MyBufferedIndexInput below.
+  public void testReadBytes() throws Exception {
+    MyBufferedIndexInput input = new MyBufferedIndexInput();
+    runReadBytes(input, BufferedIndexInput.BUFFER_SIZE, random);
+
+    // This tests the workaround code for LUCENE-1566 where readBytesInternal
+    // provides a workaround for a JVM Bug that incorrectly raises a OOM Error
+    // when a large byte buffer is passed to a file read.
+    // NOTE: this does only test the chunked reads and NOT if the Bug is triggered.
+    //final int tmpFileSize = 1024 * 1024 * 5;
+    final int inputBufferSize = 128;
+    File tmpInputFile = _TestUtil.createTempFile("IndexInput", "tmpFile", TEMP_DIR);
+    tmpInputFile.deleteOnExit();
+    writeBytes(tmpInputFile, TEST_FILE_LENGTH);
+
+    // run test with chunk size of 10 bytes
+    runReadBytesAndClose(new SimpleFSIndexInput(tmpInputFile,
+                                                inputBufferSize, 10), inputBufferSize, random);
+
+    // run test with chunk size of 10 bytes
+    runReadBytesAndClose(new NIOFSIndexInput(tmpInputFile,
+                                             inputBufferSize, 10), inputBufferSize, random);
+  }
+
+  private void runReadBytesAndClose(IndexInput input, int bufferSize, Random r)
+      throws IOException {
+    try {
+      runReadBytes(input, bufferSize, r);
+    } finally {
+      input.close();
+    }
+  }
+  
+  private void runReadBytes(IndexInput input, int bufferSize, Random r)
+      throws IOException {
+
+    int pos = 0;
+    // gradually increasing size:
+    for (int size = 1; size < bufferSize * 10; size = size + size / 200 + 1) {
+      checkReadBytes(input, size, pos);
+      pos += size;
+      if (pos >= TEST_FILE_LENGTH) {
+        // wrap
+        pos = 0;
+        input.seek(0L);
+      }
+    }
+    // wildly fluctuating size:
+    for (long i = 0; i < 100; i++) {
+      final int size = r.nextInt(10000);
+      checkReadBytes(input, 1+size, pos);
+      pos += 1+size;
+      if (pos >= TEST_FILE_LENGTH) {
+        // wrap
+        pos = 0;
+        input.seek(0L);
+      }
+    }
+    // constant small size (7 bytes):
+    for (int i = 0; i < bufferSize; i++) {
+      checkReadBytes(input, 7, pos);
+      pos += 7;
+      if (pos >= TEST_FILE_LENGTH) {
+        // wrap
+        pos = 0;
+        input.seek(0L);
+      }
+    }
+  }
+
+  private byte[] buffer = new byte[10];
+    
+  private void checkReadBytes(IndexInput input, int size, int pos) throws IOException{
+    // Just to see that "offset" is treated properly in readBytes(), we
+    // add an arbitrary offset at the beginning of the array
+    int offset = size % 10; // arbitrary
+    buffer = ArrayUtil.grow(buffer, offset+size);
+    assertEquals(pos, input.getFilePointer());
+    long left = TEST_FILE_LENGTH - input.getFilePointer();
+    if (left <= 0) {
+      return;
+    } else if (left < size) {
+      size = (int) left;
+    }
+    input.readBytes(buffer, offset, size);
+    assertEquals(pos+size, input.getFilePointer());
+    for(int i=0; i<size; i++) {
+      assertEquals("pos=" + i + " filepos=" + (pos+i), byten(pos+i), buffer[offset+i]);
+    }
+  }
+   
+  // This tests that attempts to readBytes() past an EOF will fail, while
+  // reads up to the EOF will succeed. The EOF is determined by the
+  // BufferedIndexInput's arbitrary length() value.
+  public void testEOF() throws Exception {
+     MyBufferedIndexInput input = new MyBufferedIndexInput(1024);
+     // see that we can read all the bytes at one go:
+     checkReadBytes(input, (int)input.length(), 0);  
+     // go back and see that we can't read more than that, for small and
+     // large overflows:
+     int pos = (int)input.length()-10;
+     input.seek(pos);
+     checkReadBytes(input, 10, pos);  
+     input.seek(pos);
+     try {
+       checkReadBytes(input, 11, pos);
+           fail("Block read past end of file");
+       } catch (IOException e) {
+           /* success */
+       }
+     input.seek(pos);
+     try {
+       checkReadBytes(input, 50, pos);
+           fail("Block read past end of file");
+       } catch (IOException e) {
+           /* success */
+       }
+     input.seek(pos);
+     try {
+       checkReadBytes(input, 100000, pos);
+           fail("Block read past end of file");
+       } catch (IOException e) {
+           /* success */
+       }
+  }
+
+    // byten emulates a file - byten(n) returns the n'th byte in that file.
+    // MyBufferedIndexInput reads this "file".
+    private static byte byten(long n){
+      return (byte)(n*n%256);
+    }
+    private static class MyBufferedIndexInput extends BufferedIndexInput {
+      private long pos;
+      private long len;
+      public MyBufferedIndexInput(long len){
+        this.len = len;
+        this.pos = 0;
+      }
+      public MyBufferedIndexInput(){
+        // an infinite file
+        this(Long.MAX_VALUE);
+      }
+      @Override
+      protected void readInternal(byte[] b, int offset, int length) throws IOException {
+        for(int i=offset; i<offset+length; i++)
+          b[i] = byten(pos++);
+      }
+
+      @Override
+      protected void seekInternal(long pos) throws IOException {
+        this.pos = pos;
+      }
+
+      @Override
+      public void close() throws IOException {
+      }
+
+      @Override
+      public long length() {
+        return len;
+      }
+    }
+
+    public void testSetBufferSize() throws IOException {
+      File indexDir = _TestUtil.getTempDir("testSetBufferSize");
+      MockFSDirectory dir = new MockFSDirectory(indexDir, random);
+      try {
+        IndexWriter writer = new IndexWriter(
+            dir,
+            new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+                setOpenMode(OpenMode.CREATE).
+                setMergePolicy(newLogMergePolicy(false))
+        );
+        for(int i=0;i<37;i++) {
+          Document doc = new Document();
+          doc.add(newField("content", "aaa bbb ccc ddd" + i, Field.Store.YES, Field.Index.ANALYZED));
+          doc.add(newField("id", "" + i, Field.Store.YES, Field.Index.ANALYZED));
+          writer.addDocument(doc);
+        }
+        writer.close();
+
+        dir.allIndexInputs.clear();
+
+        IndexReader reader = IndexReader.open(dir, false);
+        Term aaa = new Term("content", "aaa");
+        Term bbb = new Term("content", "bbb");
+        Term ccc = new Term("content", "ccc");
+        assertEquals(37, reader.docFreq(ccc));
+        reader.deleteDocument(0);
+        assertEquals(37, reader.docFreq(aaa));
+        dir.tweakBufferSizes();
+        reader.deleteDocument(4);
+        assertEquals(reader.docFreq(bbb), 37);
+        dir.tweakBufferSizes();
+
+        IndexSearcher searcher = newSearcher(reader);
+        ScoreDoc[] hits = searcher.search(new TermQuery(bbb), null, 1000).scoreDocs;
+        dir.tweakBufferSizes();
+        assertEquals(35, hits.length);
+        dir.tweakBufferSizes();
+        hits = searcher.search(new TermQuery(new Term("id", "33")), null, 1000).scoreDocs;
+        dir.tweakBufferSizes();
+        assertEquals(1, hits.length);
+        hits = searcher.search(new TermQuery(aaa), null, 1000).scoreDocs;
+        dir.tweakBufferSizes();
+        assertEquals(35, hits.length);
+        searcher.close();
+        reader.close();
+      } finally {
+        _TestUtil.rmDir(indexDir);
+      }
+    }
+
+    private static class MockFSDirectory extends Directory {
+
+      List<IndexInput> allIndexInputs = new ArrayList<IndexInput>();
+
+      Random rand;
+
+      private Directory dir;
+
+      public MockFSDirectory(File path, Random rand) throws IOException {
+        this.rand = rand;
+        lockFactory = NoLockFactory.getNoLockFactory();
+        dir = new SimpleFSDirectory(path, null);
+      }
+
+      @Override
+      public IndexInput openInput(String name) throws IOException {
+        return openInput(name, BufferedIndexInput.BUFFER_SIZE);
+      }
+
+      public void tweakBufferSizes() {
+        //int count = 0;
+        for (final IndexInput ip : allIndexInputs) {
+          BufferedIndexInput bii = (BufferedIndexInput) ip;
+          int bufferSize = 1024+Math.abs(rand.nextInt() % 32768);
+          bii.setBufferSize(bufferSize);
+          //count++;
+        }
+        //System.out.println("tweak'd " + count + " buffer sizes");
+      }
+      
+      @Override
+      public IndexInput openInput(String name, int bufferSize) throws IOException {
+        // Make random changes to buffer size
+        bufferSize = 1+Math.abs(rand.nextInt() % 10);
+        IndexInput f = dir.openInput(name, bufferSize);
+        allIndexInputs.add(f);
+        return f;
+      }
+
+      @Override
+      public IndexOutput createOutput(String name) throws IOException {
+        return dir.createOutput(name);
+      }
+
+      @Override
+      public void close() throws IOException {
+        dir.close();
+      }
+
+      @Override
+      public void deleteFile(String name)
+        throws IOException
+      {
+        dir.deleteFile(name);
+      }
+      @Override
+      @Deprecated
+      /*  @deprecated Lucene never uses this API; it will be
+       *  removed in 4.0. */
+      public void touchFile(String name)
+        throws IOException
+      {
+        dir.touchFile(name);
+      }
+      @Override
+      public long fileModified(String name)
+        throws IOException
+      {
+        return dir.fileModified(name);
+      }
+      @Override
+      public boolean fileExists(String name)
+        throws IOException
+      {
+        return dir.fileExists(name);
+      }
+      @Override
+      public String[] listAll()
+        throws IOException
+      {
+        return dir.listAll();
+      }
+
+      @Override
+      public long fileLength(String name) throws IOException {
+        return dir.fileLength(name);
+      }
+
+
+    }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/store/TestByteArrayDataInput.java b/lucene/backwards/src/test/org/apache/lucene/store/TestByteArrayDataInput.java
new file mode 100644
index 0000000..fb14c45
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/store/TestByteArrayDataInput.java
@@ -0,0 +1,33 @@
+package org.apache.lucene.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestByteArrayDataInput extends LuceneTestCase {
+
+  public void testBasic() throws Exception {
+    byte[] bytes = new byte[] {1, 65};
+    ByteArrayDataInput in = new ByteArrayDataInput(bytes);
+    assertEquals("A", in.readString());
+
+    bytes = new byte[] {1, 1, 65};
+    in.reset(bytes, 1, 2);
+    assertEquals("A", in.readString());
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/store/TestCopyBytes.java b/lucene/backwards/src/test/org/apache/lucene/store/TestCopyBytes.java
new file mode 100644
index 0000000..08f41e4
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/store/TestCopyBytes.java
@@ -0,0 +1,107 @@
+package org.apache.lucene.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+import org.junit.Test;
+
+public class TestCopyBytes extends LuceneTestCase {
+
+  private byte value(int idx) {
+    return (byte) ((idx%256) * (1+(idx/256)));
+  }
+
+
+  @Test
+  public void testCopyBytes() throws Exception {
+    int num = atLeast(10);
+    for(int iter=0;iter<num;iter++) {
+      Directory dir = newDirectory();
+      if (VERBOSE) {
+        System.out.println("TEST: iter=" + iter + " dir=" + dir);
+      }
+
+      // make random file
+      IndexOutput out = dir.createOutput("test");
+      byte[] bytes = new byte[_TestUtil.nextInt(random, 1, 77777)];
+      final int size = _TestUtil.nextInt(random, 1, 1777777);
+      int upto = 0;
+      int byteUpto = 0;
+      while(upto < size) {
+        bytes[byteUpto++] = value(upto);
+        upto++;
+        if (byteUpto == bytes.length) {
+          out.writeBytes(bytes, 0, bytes.length);
+          byteUpto = 0;
+        }
+      }
+
+      out.writeBytes(bytes, 0, byteUpto);
+      assertEquals(size, out.getFilePointer());
+      out.close();
+      assertEquals(size, dir.fileLength("test"));
+
+      // copy from test -> test2
+      final IndexInput in = dir.openInput("test");
+
+      out = dir.createOutput("test2");
+
+      upto = 0;
+      while(upto < size) {
+        if (random.nextBoolean()) {
+          out.writeByte(in.readByte());
+          upto++;
+        } else {
+          final int chunk = Math.min(_TestUtil.nextInt(random, 1, bytes.length), size-upto);
+          out.copyBytes(in, chunk);
+          upto += chunk;
+        }
+      }
+      assertEquals(size, upto);
+      out.close();
+      in.close();
+
+      // verify
+      IndexInput in2 = dir.openInput("test2");
+      upto = 0;
+      while(upto < size) {
+        if (random.nextBoolean()) {
+          final byte v = in2.readByte();
+          assertEquals(value(upto), v);
+          upto++;
+        } else {
+          final int limit = Math.min(_TestUtil.nextInt(random, 1, bytes.length), size-upto);
+          in2.readBytes(bytes, 0, limit);
+          for(int byteIdx=0;byteIdx<limit;byteIdx++) {
+            assertEquals(value(upto), bytes[byteIdx]);
+            upto++;
+          }
+        }
+      }
+      in2.close();
+
+      dir.deleteFile("test");
+      dir.deleteFile("test2");
+      
+      dir.close();
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/store/TestDirectory.java b/lucene/backwards/src/test/org/apache/lucene/store/TestDirectory.java
new file mode 100644
index 0000000..03e6f15
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/store/TestDirectory.java
@@ -0,0 +1,185 @@
+package org.apache.lucene.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+
+public class TestDirectory extends LuceneTestCase {
+
+  public void testDetectClose() throws Throwable {
+    Directory[] dirs = new Directory[] { new RAMDirectory(), new SimpleFSDirectory(TEMP_DIR), new NIOFSDirectory(TEMP_DIR) };
+    for (Directory dir : dirs) {
+      dir.close();
+      try {
+        dir.createOutput("test");
+        fail("did not hit expected exception");
+      } catch (AlreadyClosedException ace) {
+      }
+    }
+  }
+
+
+  // Test that different instances of FSDirectory can coexist on the same
+  // path, can read, write, and lock files.
+  public void testDirectInstantiation() throws Exception {
+    File path = _TestUtil.getTempDir("testDirectInstantiation");
+
+    int sz = 3;
+    Directory[] dirs = new Directory[sz];
+
+    dirs[0] = new SimpleFSDirectory(path, null);
+    dirs[1] = new NIOFSDirectory(path, null);
+    dirs[2] = new MMapDirectory(path, null);
+
+    for (int i=0; i<sz; i++) {
+      Directory dir = dirs[i];
+      dir.ensureOpen();
+      String fname = "foo." + i;
+      String lockname = "foo" + i + ".lck";
+      IndexOutput out = dir.createOutput(fname);
+      out.writeByte((byte)i);
+      out.close();
+
+      for (int j=0; j<sz; j++) {
+        Directory d2 = dirs[j];
+        d2.ensureOpen();
+        assertTrue(d2.fileExists(fname));
+        assertEquals(1, d2.fileLength(fname));
+
+        // don't test read on MMapDirectory, since it can't really be
+        // closed and will cause a failure to delete the file.
+        if (d2 instanceof MMapDirectory) continue;
+        
+        IndexInput input = d2.openInput(fname);
+        assertEquals((byte)i, input.readByte());
+        input.close();
+      }
+
+      // delete with a different dir
+      dirs[(i+1)%sz].deleteFile(fname);
+
+      for (int j=0; j<sz; j++) {
+        Directory d2 = dirs[j];
+        assertFalse(d2.fileExists(fname));
+      }
+
+      Lock lock = dir.makeLock(lockname);
+      assertTrue(lock.obtain());
+
+      for (int j=0; j<sz; j++) {
+        Directory d2 = dirs[j];
+        Lock lock2 = d2.makeLock(lockname);
+        try {
+          assertFalse(lock2.obtain(1));
+        } catch (LockObtainFailedException e) {
+          // OK
+        }
+      }
+
+      lock.release();
+      
+      // now lock with different dir
+      lock = dirs[(i+1)%sz].makeLock(lockname);
+      assertTrue(lock.obtain());
+      lock.release();
+    }
+
+    for (int i=0; i<sz; i++) {
+      Directory dir = dirs[i];
+      dir.ensureOpen();
+      dir.close();
+      assertFalse(dir.isOpen);
+    }
+    
+    _TestUtil.rmDir(path);
+  }
+
+  // LUCENE-1464
+  public void testDontCreate() throws Throwable {
+    File path = new File(TEMP_DIR, "doesnotexist");
+    try {
+      assertTrue(!path.exists());
+      Directory dir = new SimpleFSDirectory(path, null);
+      assertTrue(!path.exists());
+      dir.close();
+    } finally {
+      _TestUtil.rmDir(path);
+    }
+  }
+
+  // LUCENE-1468
+  public void testRAMDirectoryFilter() throws IOException {
+    checkDirectoryFilter(new RAMDirectory());
+  }
+
+  // LUCENE-1468
+  public void testFSDirectoryFilter() throws IOException {
+    checkDirectoryFilter(newFSDirectory(_TestUtil.getTempDir("test")));
+  }
+
+  // LUCENE-1468
+  private void checkDirectoryFilter(Directory dir) throws IOException {
+    String name = "file";
+    try {
+      dir.createOutput(name).close();
+      assertTrue(dir.fileExists(name));
+      assertTrue(Arrays.asList(dir.listAll()).contains(name));
+    } finally {
+      dir.close();
+    }
+  }
+
+  // LUCENE-1468
+  public void testCopySubdir() throws Throwable {
+    File path = _TestUtil.getTempDir("testsubdir");
+    try {
+      path.mkdirs();
+      new File(path, "subdir").mkdirs();
+      Directory fsDir = new SimpleFSDirectory(path, null);
+      assertEquals(0, new RAMDirectory(fsDir).listAll().length);
+    } finally {
+      _TestUtil.rmDir(path);
+    }
+  }
+
+  // LUCENE-1468
+  public void testNotDirectory() throws Throwable {
+    File path = _TestUtil.getTempDir("testnotdir");
+    Directory fsDir = new SimpleFSDirectory(path, null);
+    try {
+      IndexOutput out = fsDir.createOutput("afile");
+      out.close();
+      assertTrue(fsDir.fileExists("afile"));
+      try {
+        new SimpleFSDirectory(new File(path, "afile"), null);
+        fail("did not hit expected exception");
+      } catch (NoSuchDirectoryException nsde) {
+        // Expected
+      }
+    } finally {
+      fsDir.close();
+      _TestUtil.rmDir(path);
+    }
+  }
+}
+
diff --git a/lucene/backwards/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java b/lucene/backwards/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java
new file mode 100644
index 0000000..66dcc8b
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java
@@ -0,0 +1,115 @@
+package org.apache.lucene.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.TestIndexWriterReader;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestFileSwitchDirectory extends LuceneTestCase {
+  /**
+   * Test if writing doc stores to disk and everything else to ram works.
+   * @throws IOException
+   */
+  public void testBasic() throws IOException {
+    Set<String> fileExtensions = new HashSet<String>();
+    fileExtensions.add(IndexFileNames.FIELDS_EXTENSION);
+    fileExtensions.add(IndexFileNames.FIELDS_INDEX_EXTENSION);
+    
+    MockDirectoryWrapper primaryDir = new MockDirectoryWrapper(random, new RAMDirectory());
+    primaryDir.setCheckIndexOnClose(false); // only part of an index
+    MockDirectoryWrapper secondaryDir = new MockDirectoryWrapper(random, new RAMDirectory());
+    secondaryDir.setCheckIndexOnClose(false); // only part of an index
+    
+    FileSwitchDirectory fsd = new FileSwitchDirectory(fileExtensions, primaryDir, secondaryDir, true);
+    IndexWriter writer = new IndexWriter(
+        fsd,
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMergePolicy(newLogMergePolicy(false))
+    );
+    TestIndexWriterReader.createIndexNoClose(true, "ram", writer);
+    IndexReader reader = IndexReader.open(writer, true);
+    assertEquals(100, reader.maxDoc());
+    writer.commit();
+    // we should see only fdx,fdt files here
+    String[] files = primaryDir.listAll();
+    assertTrue(files.length > 0);
+    for (int x=0; x < files.length; x++) {
+      String ext = FileSwitchDirectory.getExtension(files[x]);
+      assertTrue(fileExtensions.contains(ext));
+    }
+    files = secondaryDir.listAll();
+    assertTrue(files.length > 0);
+    // we should not see fdx,fdt files here
+    for (int x=0; x < files.length; x++) {
+      String ext = FileSwitchDirectory.getExtension(files[x]);
+      assertFalse(fileExtensions.contains(ext));
+    }
+    reader.close();
+    writer.close();
+
+    files = fsd.listAll();
+    for(int i=0;i<files.length;i++) {
+      assertNotNull(files[i]);
+    }
+    fsd.close();
+  }
+  
+  private Directory newFSSwitchDirectory(Set<String> primaryExtensions) throws IOException {
+    Directory a = new SimpleFSDirectory(_TestUtil.getTempDir("foo"));
+    Directory b = new SimpleFSDirectory(_TestUtil.getTempDir("bar"));
+    FileSwitchDirectory switchDir = new FileSwitchDirectory(primaryExtensions, a, b, true);
+    return new MockDirectoryWrapper(random, switchDir);
+  }
+  
+  // LUCENE-3380 -- make sure we get exception if the directory really does not exist.
+  public void testNoDir() throws Throwable {
+    Directory dir = newFSSwitchDirectory(Collections.<String>emptySet());
+    try {
+      IndexReader.open(dir, true);
+      fail("did not hit expected exception");
+    } catch (NoSuchDirectoryException nsde) {
+      // expected
+    }
+    dir.close();
+  }
+  
+  // LUCENE-3380 test that we can add a file, and then when we call list() we get it back
+  public void testDirectoryFilter() throws IOException {
+    Directory dir = newFSSwitchDirectory(Collections.<String>emptySet());
+    String name = "file";
+    try {
+      dir.createOutput(name).close();
+      assertTrue(dir.fileExists(name));
+      assertTrue(Arrays.asList(dir.listAll()).contains(name));
+    } finally {
+      dir.close();
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/store/TestHugeRamFile.java b/lucene/backwards/src/test/org/apache/lucene/store/TestHugeRamFile.java
new file mode 100755
index 0000000..3530fdd
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/store/TestHugeRamFile.java
@@ -0,0 +1,102 @@
+package org.apache.lucene.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.HashMap;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+/** Test huge RAMFile with more than Integer.MAX_VALUE bytes. */
+public class TestHugeRamFile extends LuceneTestCase {
+  
+  private static final long MAX_VALUE = (long) 2 * (long) Integer.MAX_VALUE;
+
+  /** Fake a huge ram file by using the same byte buffer for all 
+   * buffers under maxint. */
+  private static class DenseRAMFile extends RAMFile {
+    private long capacity = 0;
+    private HashMap<Integer,byte[]> singleBuffers = new HashMap<Integer,byte[]>();
+    @Override
+    protected byte[] newBuffer(int size) {
+      capacity += size;
+      if (capacity <= MAX_VALUE) {
+        // below maxint we reuse buffers
+        byte buf[] = singleBuffers.get(Integer.valueOf(size));
+        if (buf==null) {
+          buf = new byte[size]; 
+          //System.out.println("allocate: "+size);
+          singleBuffers.put(Integer.valueOf(size),buf);
+        }
+        return buf;
+      }
+      //System.out.println("allocate: "+size); System.out.flush();
+      return new byte[size];
+    }
+  }
+  
+  /** Test huge RAMFile with more than Integer.MAX_VALUE bytes. (LUCENE-957) */
+  public void testHugeFile() throws IOException {
+    DenseRAMFile f = new DenseRAMFile();
+    // output part
+    RAMOutputStream out = new RAMOutputStream(f);
+    byte b1[] = new byte[RAMOutputStream.BUFFER_SIZE];
+    byte b2[] = new byte[RAMOutputStream.BUFFER_SIZE / 3];
+    for (int i = 0; i < b1.length; i++) {
+      b1[i] = (byte) (i & 0x0007F);
+    }
+    for (int i = 0; i < b2.length; i++) {
+      b2[i] = (byte) (i & 0x0003F);
+    }
+    long n = 0;
+    assertEquals("output length must match",n,out.length());
+    while (n <= MAX_VALUE - b1.length) {
+      out.writeBytes(b1,0,b1.length);
+      out.flush();
+      n += b1.length;
+      assertEquals("output length must match",n,out.length());
+    }
+    //System.out.println("after writing b1's, length = "+out.length()+" (MAX_VALUE="+MAX_VALUE+")");
+    int m = b2.length;
+    long L = 12;
+    for (int j=0; j<L; j++) {
+      for (int i = 0; i < b2.length; i++) {
+        b2[i]++;
+      }
+      out.writeBytes(b2,0,m);
+      out.flush();
+      n += m;
+      assertEquals("output length must match",n,out.length());
+    }
+    out.close();
+    // input part
+    RAMInputStream in = new RAMInputStream(f);
+    assertEquals("input length must match",n,in.length());
+    //System.out.println("input length = "+in.length()+" % 1024 = "+in.length()%1024);
+    for (int j=0; j<L; j++) {
+      long loc = n - (L-j)*m; 
+      in.seek(loc/3);
+      in.seek(loc);
+      for (int i=0; i<m; i++) {
+        byte bt = in.readByte();
+        byte expected = (byte) (1 + j + (i & 0x0003F));
+        assertEquals("must read same value that was written! j="+j+" i="+i,expected,bt);
+      }
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/store/TestLock.java b/lucene/backwards/src/test/org/apache/lucene/store/TestLock.java
new file mode 100644
index 0000000..4c83ba5
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/store/TestLock.java
@@ -0,0 +1,55 @@
+package org.apache.lucene.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import java.io.IOException;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestLock extends LuceneTestCase {
+
+    public void testObtain() {
+        LockMock lock = new LockMock();
+        Lock.LOCK_POLL_INTERVAL = 10;
+
+        try {
+            lock.obtain(Lock.LOCK_POLL_INTERVAL);
+            fail("Should have failed to obtain lock");
+        } catch (IOException e) {
+            assertEquals("should attempt to lock more than once", lock.lockAttempts, 2);
+        }
+    }
+
+    private class LockMock extends Lock {
+        public int lockAttempts;
+
+        @Override
+        public boolean obtain() {
+            lockAttempts++;
+            return false;
+        }
+        @Override
+        public void release() {
+            // do nothing
+        }
+        @Override
+        public boolean isLocked() {
+            return false;
+        }
+    }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/store/TestLockFactory.java b/lucene/backwards/src/test/org/apache/lucene/store/TestLockFactory.java
new file mode 100755
index 0000000..1253b42
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/store/TestLockFactory.java
@@ -0,0 +1,420 @@
+package org.apache.lucene.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestLockFactory extends LuceneTestCase {
+
+    // Verify: we can provide our own LockFactory implementation, the right
+    // methods are called at the right time, locks are created, etc.
+
+    public void testCustomLockFactory() throws IOException {
+        Directory dir = new MockDirectoryWrapper(random, new RAMDirectory());
+        MockLockFactory lf = new MockLockFactory();
+        dir.setLockFactory(lf);
+
+        // Lock prefix should have been set:
+        assertTrue("lock prefix was not set by the RAMDirectory", lf.lockPrefixSet);
+
+        IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+
+        // add 100 documents (so that commit lock is used)
+        for (int i = 0; i < 100; i++) {
+            addDoc(writer);
+        }
+
+        // Both write lock and commit lock should have been created:
+        assertEquals("# of unique locks created (after instantiating IndexWriter)",
+                     1, lf.locksCreated.size());
+        assertTrue("# calls to makeLock is 0 (after instantiating IndexWriter)",
+                   lf.makeLockCount >= 1);
+        
+        for(final String lockName : lf.locksCreated.keySet()) {
+            MockLockFactory.MockLock lock = (MockLockFactory.MockLock) lf.locksCreated.get(lockName);
+            assertTrue("# calls to Lock.obtain is 0 (after instantiating IndexWriter)",
+                       lock.lockAttempts > 0);
+        }
+        
+        writer.close();
+    }
+
+    // Verify: we can use the NoLockFactory with RAMDirectory w/ no
+    // exceptions raised:
+    // Verify: NoLockFactory allows two IndexWriters
+    public void testRAMDirectoryNoLocking() throws IOException {
+        Directory dir = new MockDirectoryWrapper(random, new RAMDirectory());
+        dir.setLockFactory(NoLockFactory.getNoLockFactory());
+
+        assertTrue("RAMDirectory.setLockFactory did not take",
+                   NoLockFactory.class.isInstance(dir.getLockFactory()));
+
+        IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+        writer.commit(); // required so the second open succeed 
+        // Create a 2nd IndexWriter.  This is normally not allowed but it should run through since we're not
+        // using any locks:
+        IndexWriter writer2 = null;
+        try {
+            writer2 = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+        } catch (Exception e) {
+            e.printStackTrace(System.out);
+            fail("Should not have hit an IOException with no locking");
+        }
+
+        writer.close();
+        if (writer2 != null) {
+            writer2.close();
+        }
+    }
+
+    // Verify: SingleInstanceLockFactory is the default lock for RAMDirectory
+    // Verify: RAMDirectory does basic locking correctly (can't create two IndexWriters)
+    public void testDefaultRAMDirectory() throws IOException {
+        Directory dir = new RAMDirectory();
+
+        assertTrue("RAMDirectory did not use correct LockFactory: got " + dir.getLockFactory(),
+                   SingleInstanceLockFactory.class.isInstance(dir.getLockFactory()));
+
+        IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+
+        // Create a 2nd IndexWriter.  This should fail:
+        IndexWriter writer2 = null;
+        try {
+            writer2 = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+            fail("Should have hit an IOException with two IndexWriters on default SingleInstanceLockFactory");
+        } catch (IOException e) {
+        }
+
+        writer.close();
+        if (writer2 != null) {
+            writer2.close();
+        }
+    }
+    
+    public void testSimpleFSLockFactory() throws IOException {
+      // test string file instantiation
+      new SimpleFSLockFactory("test");
+    }
+
+    // Verify: do stress test, by opening IndexReaders and
+    // IndexWriters over & over in 2 threads and making sure
+    // no unexpected exceptions are raised:
+    public void testStressLocks() throws Exception {
+      _testStressLocks(null, _TestUtil.getTempDir("index.TestLockFactory6"));
+    }
+
+    // Verify: do stress test, by opening IndexReaders and
+    // IndexWriters over & over in 2 threads and making sure
+    // no unexpected exceptions are raised, but use
+    // NativeFSLockFactory:
+    public void testStressLocksNativeFSLockFactory() throws Exception {
+      File dir = _TestUtil.getTempDir("index.TestLockFactory7");
+      _testStressLocks(new NativeFSLockFactory(dir), dir);
+    }
+
+    public void _testStressLocks(LockFactory lockFactory, File indexDir) throws Exception {
+      Directory dir = newFSDirectory(indexDir, lockFactory);
+
+        // First create a 1 doc index:
+        IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
+        addDoc(w);
+        w.close();
+
+      WriterThread writer = new WriterThread(100, dir);
+      SearcherThread searcher = new SearcherThread(100, dir);
+      writer.start();
+      searcher.start();
+
+      while(writer.isAlive() || searcher.isAlive()) {
+        Thread.sleep(1000);
+      }
+
+      assertTrue("IndexWriter hit unexpected exceptions", !writer.hitException);
+      assertTrue("IndexSearcher hit unexpected exceptions", !searcher.hitException);
+
+      dir.close();
+      // Cleanup
+      _TestUtil.rmDir(indexDir);
+    }
+
+    // Verify: NativeFSLockFactory works correctly
+    public void testNativeFSLockFactory() throws IOException {
+
+      NativeFSLockFactory f = new NativeFSLockFactory(TEMP_DIR);
+
+      f.setLockPrefix("test");
+      Lock l = f.makeLock("commit");
+      Lock l2 = f.makeLock("commit");
+
+      assertTrue("failed to obtain lock", l.obtain());
+      assertTrue("succeeded in obtaining lock twice", !l2.obtain());
+      l.release();
+
+      assertTrue("failed to obtain 2nd lock after first one was freed", l2.obtain());
+      l2.release();
+
+      // Make sure we can obtain first one again, test isLocked():
+      assertTrue("failed to obtain lock", l.obtain());
+      assertTrue(l.isLocked());
+      assertTrue(l2.isLocked());
+      l.release();
+      assertFalse(l.isLocked());
+      assertFalse(l2.isLocked());
+    }
+
+    
+    // Verify: NativeFSLockFactory works correctly if the lock file exists
+    public void testNativeFSLockFactoryLockExists() throws IOException {
+      
+      File lockFile = new File(TEMP_DIR, "test.lock");
+      lockFile.createNewFile();
+      
+      Lock l = new NativeFSLockFactory(TEMP_DIR).makeLock("test.lock");
+      assertTrue("failed to obtain lock", l.obtain());
+      l.release();
+      assertFalse("failed to release lock", l.isLocked());
+      if (lockFile.exists()) {
+        lockFile.delete();
+      }
+    }
+
+    public void testNativeFSLockReleaseByOtherLock() throws IOException {
+
+      NativeFSLockFactory f = new NativeFSLockFactory(TEMP_DIR);
+
+      f.setLockPrefix("test");
+      Lock l = f.makeLock("commit");
+      Lock l2 = f.makeLock("commit");
+
+      assertTrue("failed to obtain lock", l.obtain());
+      try {
+        assertTrue(l2.isLocked());
+        l2.release();
+        fail("should not have reached here. LockReleaseFailedException should have been thrown");
+      } catch (LockReleaseFailedException e) {
+        // expected
+      } finally {
+        l.release();
+      }
+    }
+
+    // Verify: NativeFSLockFactory assigns null as lockPrefix if the lockDir is inside directory
+    public void testNativeFSLockFactoryPrefix() throws IOException {
+      File fdir1 = _TestUtil.getTempDir("TestLockFactory.8");
+      File fdir2 = _TestUtil.getTempDir("TestLockFactory.8.Lockdir");
+      Directory dir1 = newFSDirectory(fdir1, new NativeFSLockFactory(fdir1));
+      // same directory, but locks are stored somewhere else. The prefix of the lock factory should != null
+      Directory dir2 = newFSDirectory(fdir1, new NativeFSLockFactory(fdir2));
+      
+      String prefix1 = dir1.getLockFactory().getLockPrefix();
+      assertNull("Lock prefix for lockDir same as directory should be null", prefix1);
+      
+      String prefix2 = dir2.getLockFactory().getLockPrefix();
+      assertNotNull("Lock prefix for lockDir outside of directory should be not null", prefix2);
+      
+      dir1.close();
+      dir2.close();
+      
+      _TestUtil.rmDir(fdir1);
+      _TestUtil.rmDir(fdir2);
+    }
+
+    // Verify: default LockFactory has no prefix (ie
+    // write.lock is stored in index):
+    public void testDefaultFSLockFactoryPrefix() throws IOException {
+      // Make sure we get null prefix, which wont happen if setLockFactory is ever called.
+      File dirName = _TestUtil.getTempDir("TestLockFactory.10");
+
+      Directory dir = new SimpleFSDirectory(dirName);
+      assertNull("Default lock prefix should be null", dir.getLockFactory().getLockPrefix());
+      dir.close();
+      
+      dir = new MMapDirectory(dirName);
+      assertNull("Default lock prefix should be null", dir.getLockFactory().getLockPrefix());
+      dir.close();
+      
+      dir = new NIOFSDirectory(dirName);
+      assertNull("Default lock prefix should be null", dir.getLockFactory().getLockPrefix());
+      dir.close();
+ 
+      _TestUtil.rmDir(dirName);
+    }
+
+    private class WriterThread extends Thread { 
+        private Directory dir;
+        private int numIteration;
+        public boolean hitException = false;
+        public WriterThread(int numIteration, Directory dir) {
+            this.numIteration = numIteration;
+            this.dir = dir;
+        }
+        @Override
+        public void run() {
+            IndexWriter writer = null;
+            for(int i=0;i<this.numIteration;i++) {
+                try {
+                    writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+                } catch (IOException e) {
+                    if (e.toString().indexOf(" timed out:") == -1) {
+                        hitException = true;
+                        System.out.println("Stress Test Index Writer: creation hit unexpected IOException: " + e.toString());
+                        e.printStackTrace(System.out);
+                    } else {
+                        // lock obtain timed out
+                        // NOTE: we should at some point
+                        // consider this a failure?  The lock
+                        // obtains, across IndexReader &
+                        // IndexWriters should be "fair" (ie
+                        // FIFO).
+                    }
+                } catch (Exception e) {
+                    hitException = true;
+                    System.out.println("Stress Test Index Writer: creation hit unexpected exception: " + e.toString());
+                    e.printStackTrace(System.out);
+                    break;
+                }
+                if (writer != null) {
+                    try {
+                        addDoc(writer);
+                    } catch (IOException e) {
+                        hitException = true;
+                        System.out.println("Stress Test Index Writer: addDoc hit unexpected exception: " + e.toString());
+                        e.printStackTrace(System.out);
+                        break;
+                    }
+                    try {
+                        writer.close();
+                    } catch (IOException e) {
+                        hitException = true;
+                        System.out.println("Stress Test Index Writer: close hit unexpected exception: " + e.toString());
+                        e.printStackTrace(System.out);
+                        break;
+                    }
+                    writer = null;
+                }
+            }
+        }
+    }
+
+    private class SearcherThread extends Thread { 
+        private Directory dir;
+        private int numIteration;
+        public boolean hitException = false;
+        public SearcherThread(int numIteration, Directory dir) {
+            this.numIteration = numIteration;
+            this.dir = dir;
+        }
+        @Override
+        public void run() {
+            IndexSearcher searcher = null;
+            Query query = new TermQuery(new Term("content", "aaa"));
+            for(int i=0;i<this.numIteration;i++) {
+                try{
+                    searcher = new IndexSearcher(dir, false);
+                } catch (Exception e) {
+                    hitException = true;
+                    System.out.println("Stress Test Index Searcher: create hit unexpected exception: " + e.toString());
+                    e.printStackTrace(System.out);
+                    break;
+                }
+                try {
+                  searcher.search(query, null, 1000);
+                } catch (IOException e) {
+                  hitException = true;
+                  System.out.println("Stress Test Index Searcher: search hit unexpected exception: " + e.toString());
+                  e.printStackTrace(System.out);
+                  break;
+                }
+                // System.out.println(hits.length() + " total results");
+                try {
+                  searcher.close();
+                } catch (IOException e) {
+                  hitException = true;
+                  System.out.println("Stress Test Index Searcher: close hit unexpected exception: " + e.toString());
+                  e.printStackTrace(System.out);
+                  break;
+                }
+            }
+        }
+    }
+
+    public class MockLockFactory extends LockFactory {
+
+        public boolean lockPrefixSet;
+        public Map<String,Lock> locksCreated = Collections.synchronizedMap(new HashMap<String,Lock>());
+        public int makeLockCount = 0;
+
+        @Override
+        public void setLockPrefix(String lockPrefix) {    
+            super.setLockPrefix(lockPrefix);
+            lockPrefixSet = true;
+        }
+
+        @Override
+        synchronized public Lock makeLock(String lockName) {
+            Lock lock = new MockLock();
+            locksCreated.put(lockName, lock);
+            makeLockCount++;
+            return lock;
+        }
+
+        @Override
+        public void clearLock(String specificLockName) {}
+
+        public class MockLock extends Lock {
+            public int lockAttempts;
+
+            @Override
+            public boolean obtain() {
+                lockAttempts++;
+                return true;
+            }
+            @Override
+            public void release() {
+                // do nothing
+            }
+            @Override
+            public boolean isLocked() {
+                return false;
+            }
+        }
+    }
+
+    private void addDoc(IndexWriter writer) throws IOException {
+        Document doc = new Document();
+        doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+        writer.addDocument(doc);
+    }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/store/TestMultiMMap.java b/lucene/backwards/src/test/org/apache/lucene/store/TestMultiMMap.java
new file mode 100644
index 0000000..7669bb1
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/store/TestMultiMMap.java
@@ -0,0 +1,148 @@
+package org.apache.lucene.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.util.Random;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+/**
+ * Tests MMapDirectory's MultiMMapIndexInput
+ * <p>
+ * Because Java's ByteBuffer uses an int to address the
+ * values, it's necessary to access a file >
+ * Integer.MAX_VALUE in size using multiple byte buffers.
+ */
+public class TestMultiMMap extends LuceneTestCase {
+  File workDir;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    assumeTrue("test requires a jre that supports unmapping", MMapDirectory.UNMAP_SUPPORTED);
+    workDir = _TestUtil.getTempDir("TestMultiMMap");
+    workDir.mkdirs();
+  }
+
+  public void testSeekZero() throws Exception {
+    for (int i = 0; i < 31; i++) {
+      MMapDirectory mmapDir = new MMapDirectory(_TestUtil.getTempDir("testSeekZero"));
+      mmapDir.setMaxChunkSize(1<<i);
+      IndexOutput io = mmapDir.createOutput("zeroBytes");
+      io.close();
+      IndexInput ii = mmapDir.openInput("zeroBytes");
+      ii.seek(0L);
+      ii.close();
+      mmapDir.close();
+    }
+  }
+  
+  public void testSeekEnd() throws Exception {
+    for (int i = 0; i < 17; i++) {
+      MMapDirectory mmapDir = new MMapDirectory(_TestUtil.getTempDir("testSeekEnd"));
+      mmapDir.setMaxChunkSize(1<<i);
+      IndexOutput io = mmapDir.createOutput("bytes");
+      byte bytes[] = new byte[1<<i];
+      random.nextBytes(bytes);
+      io.writeBytes(bytes, bytes.length);
+      io.close();
+      IndexInput ii = mmapDir.openInput("bytes");
+      byte actual[] = new byte[1<<i];
+      ii.readBytes(actual, 0, actual.length);
+      assertEquals(new BytesRef(bytes), new BytesRef(actual));
+      ii.seek(1<<i);
+      ii.close();
+      mmapDir.close();
+    }
+  }
+  
+  public void testSeeking() throws Exception {
+    for (int i = 0; i < 10; i++) {
+      MMapDirectory mmapDir = new MMapDirectory(_TestUtil.getTempDir("testSeeking"));
+      mmapDir.setMaxChunkSize(1<<i);
+      IndexOutput io = mmapDir.createOutput("bytes");
+      byte bytes[] = new byte[1<<(i+1)]; // make sure we switch buffers
+      random.nextBytes(bytes);
+      io.writeBytes(bytes, bytes.length);
+      io.close();
+      IndexInput ii = mmapDir.openInput("bytes");
+      byte actual[] = new byte[1<<(i+1)]; // first read all bytes
+      ii.readBytes(actual, 0, actual.length);
+      assertEquals(new BytesRef(bytes), new BytesRef(actual));
+      for (int sliceStart = 0; sliceStart < bytes.length; sliceStart++) {
+        for (int sliceLength = 0; sliceLength < bytes.length - sliceStart; sliceLength++) {
+          byte slice[] = new byte[sliceLength];
+          ii.seek(sliceStart);
+          ii.readBytes(slice, 0, slice.length);
+          assertEquals(new BytesRef(bytes, sliceStart, sliceLength), new BytesRef(slice));
+        }
+      }
+      ii.close();
+      mmapDir.close();
+    }
+  }
+  
+  public void testRandomChunkSizes() throws Exception {
+    int num = atLeast(10);
+    for (int i = 0; i < num; i++)
+      assertChunking(random, _TestUtil.nextInt(random, 20, 100));
+  }
+  
+  private void assertChunking(Random random, int chunkSize) throws Exception {
+    File path = _TestUtil.createTempFile("mmap" + chunkSize, "tmp", workDir);
+    path.delete();
+    path.mkdirs();
+    MMapDirectory mmapDir = new MMapDirectory(path);
+    mmapDir.setMaxChunkSize(chunkSize);
+    // we will map a lot, try to turn on the unmap hack
+    if (MMapDirectory.UNMAP_SUPPORTED)
+      mmapDir.setUseUnmap(true);
+    MockDirectoryWrapper dir = new MockDirectoryWrapper(random, mmapDir);
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    Document doc = new Document();
+    Field docid = newField("docid", "0", Field.Store.YES, Field.Index.NOT_ANALYZED);
+    Field junk = newField("junk", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+    doc.add(docid);
+    doc.add(junk);
+    
+    int numDocs = 100;
+    for (int i = 0; i < numDocs; i++) {
+      docid.setValue("" + i);
+      junk.setValue(_TestUtil.randomUnicodeString(random));
+      writer.addDocument(doc);
+    }
+    IndexReader reader = writer.getReader();
+    writer.close();
+    
+    int numAsserts = atLeast(100);
+    for (int i = 0; i < numAsserts; i++) {
+      int docID = random.nextInt(numDocs);
+      assertEquals("" + docID, reader.document(docID).get("docid"));
+    }
+    reader.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/store/TestRAMDirectory.java b/lucene/backwards/src/test/org/apache/lucene/store/TestRAMDirectory.java
new file mode 100644
index 0000000..b0f599c
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/store/TestRAMDirectory.java
@@ -0,0 +1,203 @@
+package org.apache.lucene.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.io.ByteArrayOutputStream;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.util.English;
+
+/**
+ * JUnit testcase to test RAMDirectory. RAMDirectory itself is used in many testcases,
+ * but not one of them uses an different constructor other than the default constructor.
+ */
+public class TestRAMDirectory extends LuceneTestCase {
+  
+  private File indexDir = null;
+  
+  // add enough document so that the index will be larger than RAMDirectory.READ_BUFFER_SIZE
+  private final int docsToAdd = 500;
+  
+  // setup the index
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    indexDir = _TestUtil.getTempDir("RAMDirIndex");
+    
+    Directory dir = newFSDirectory(indexDir);
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
+    // add some documents
+    Document doc = null;
+    for (int i = 0; i < docsToAdd; i++) {
+      doc = new Document();
+      doc.add(newField("content", English.intToEnglish(i).trim(), Field.Store.YES, Field.Index.NOT_ANALYZED));
+      writer.addDocument(doc);
+    }
+    assertEquals(docsToAdd, writer.maxDoc());
+    writer.close();
+    dir.close();
+  }
+  
+  public void testRAMDirectory () throws IOException {
+    
+    Directory dir = newFSDirectory(indexDir);
+    MockDirectoryWrapper ramDir = new MockDirectoryWrapper(random, new RAMDirectory(dir));
+    
+    // close the underlaying directory
+    dir.close();
+    
+    // Check size
+    assertEquals(ramDir.sizeInBytes(), ramDir.getRecomputedSizeInBytes());
+    
+    // open reader to test document count
+    IndexReader reader = IndexReader.open(ramDir, true);
+    assertEquals(docsToAdd, reader.numDocs());
+    
+    // open search zo check if all doc's are there
+    IndexSearcher searcher = newSearcher(reader);
+    
+    // search for all documents
+    for (int i = 0; i < docsToAdd; i++) {
+      Document doc = searcher.doc(i);
+      assertTrue(doc.getField("content") != null);
+    }
+
+    // cleanup
+    reader.close();
+    searcher.close();
+  }
+  
+  private final int numThreads = 10;
+  private final int docsPerThread = 40;
+  
+  public void testRAMDirectorySize() throws IOException, InterruptedException {
+      
+    Directory dir = newFSDirectory(indexDir);
+    final MockDirectoryWrapper ramDir = new MockDirectoryWrapper(random, new RAMDirectory(dir));
+    dir.close();
+    
+    final IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+    writer.optimize();
+    
+    assertEquals(ramDir.sizeInBytes(), ramDir.getRecomputedSizeInBytes());
+    
+    Thread[] threads = new Thread[numThreads];
+    for (int i=0; i<numThreads; i++) {
+      final int num = i;
+      threads[i] = new Thread(){
+        @Override
+        public void run() {
+          for (int j=1; j<docsPerThread; j++) {
+            Document doc = new Document();
+            doc.add(newField("sizeContent", English.intToEnglish(num*docsPerThread+j).trim(), Field.Store.YES, Field.Index.NOT_ANALYZED));
+            try {
+              writer.addDocument(doc);
+            } catch (IOException e) {
+              throw new RuntimeException(e);
+            }
+          }
+        }
+      };
+    }
+    for (int i=0; i<numThreads; i++)
+      threads[i].start();
+    for (int i=0; i<numThreads; i++)
+      threads[i].join();
+
+    writer.optimize();
+    assertEquals(ramDir.sizeInBytes(), ramDir.getRecomputedSizeInBytes());
+    
+    writer.close();
+  }
+
+
+  public void testSerializable() throws IOException {
+    Directory dir = new RAMDirectory();
+    ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
+    assertEquals("initially empty", 0, bos.size());
+    ObjectOutput out = new ObjectOutputStream(bos);
+    int headerSize = bos.size();
+    out.writeObject(dir);
+    out.close();
+    assertTrue("contains more then just header", headerSize < bos.size());
+  } 
+
+  @Override
+  public void tearDown() throws Exception {
+    // cleanup 
+    if (indexDir != null && indexDir.exists()) {
+      rmDir (indexDir);
+    }
+    super.tearDown();
+  }
+
+  // LUCENE-1196
+  public void testIllegalEOF() throws Exception {
+    RAMDirectory dir = new RAMDirectory();
+    IndexOutput o = dir.createOutput("out");
+    byte[] b = new byte[1024];
+    o.writeBytes(b, 0, 1024);
+    o.close();
+    IndexInput i = dir.openInput("out");
+    i.seek(1024);
+    i.close();
+    dir.close();
+  }
+  
+  private void rmDir(File dir) {
+    File[] files = dir.listFiles();
+    for (int i = 0; i < files.length; i++) {
+      files[i].delete();
+    }
+    dir.delete();
+  }
+
+  // LUCENE-2852
+  public void testSeekToEOFThenBack() throws Exception {
+    RAMDirectory dir = new RAMDirectory();
+
+    IndexOutput o = dir.createOutput("out");
+    byte[] bytes = new byte[3*RAMInputStream.BUFFER_SIZE];
+    o.writeBytes(bytes, 0, bytes.length);
+    o.close();
+
+    IndexInput i = dir.openInput("out");
+    i.seek(2*RAMInputStream.BUFFER_SIZE-1);
+    i.seek(3*RAMInputStream.BUFFER_SIZE);
+    i.seek(RAMInputStream.BUFFER_SIZE);
+    i.readBytes(bytes, 0, 2*RAMInputStream.BUFFER_SIZE);
+    i.close();
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/store/TestWindowsMMap.java b/lucene/backwards/src/test/org/apache/lucene/store/TestWindowsMMap.java
new file mode 100644
index 0000000..6ecda82
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/store/TestWindowsMMap.java
@@ -0,0 +1,107 @@
+package org.apache.lucene.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Collections;
+import java.io.File;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.IndexSearcher;
+
+public class TestWindowsMMap extends LuceneTestCase {
+  
+  private final static String alphabet = "abcdefghijklmnopqrstuvwzyz";
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+  }
+  
+  private String randomToken() {
+    int tl = 1 + random.nextInt(7);
+    StringBuilder sb = new StringBuilder();
+    for(int cx = 0; cx < tl; cx ++) {
+      int c = random.nextInt(25);
+      sb.append(alphabet.substring(c, c+1));
+    }
+    return sb.toString();
+  }
+  
+  private String randomField() {
+    int fl = 1 + random.nextInt(3);
+    StringBuilder fb = new StringBuilder();
+    for(int fx = 0; fx < fl; fx ++) {
+      fb.append(randomToken());
+      fb.append(" ");
+    }
+    return fb.toString();
+  }
+  
+  private final static String storePathname = 
+   _TestUtil.getTempDir("testLuceneMmap").getAbsolutePath();
+
+  public void testMmapIndex() throws Exception {
+    // sometimes the directory is not cleaned by rmDir, because on Windows it
+    // may take some time until the files are finally dereferenced. So clean the
+    // directory up front, or otherwise new IndexWriter will fail.
+    File dirPath = new File(storePathname);
+    rmDir(dirPath);
+    MMapDirectory dir = new MMapDirectory(dirPath, null);
+    
+    // plan to add a set of useful stopwords, consider changing some of the
+    // interior filters.
+    MockAnalyzer analyzer = new MockAnalyzer(random);
+    // TODO: something about lock timeouts and leftover locks.
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, analyzer)
+        .setOpenMode(OpenMode.CREATE));
+    writer.commit();
+    IndexSearcher searcher = new IndexSearcher(dir, true);
+    
+    int num = atLeast(1000);
+    for (int dx = 0; dx < num; dx++) {
+      String f = randomField();
+      Document doc = new Document();
+      doc.add(newField("data", f, Field.Store.YES, Field.Index.ANALYZED));	
+      writer.addDocument(doc);
+    }
+    
+    searcher.close();
+    writer.close();
+    rmDir(dirPath);
+  }
+
+  private void rmDir(File dir) {
+    if (!dir.exists()) {
+      return;
+    }
+    for (File file : dir.listFiles()) {
+      file.delete();
+    }
+    dir.delete();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestArrayUtil.java b/lucene/backwards/src/test/org/apache/lucene/util/TestArrayUtil.java
new file mode 100644
index 0000000..93f583c
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestArrayUtil.java
@@ -0,0 +1,250 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.Collections;
+
+public class TestArrayUtil extends LuceneTestCase {
+
+  // Ensure ArrayUtil.getNextSize gives linear amortized cost of realloc/copy
+  public void testGrowth() {
+    int currentSize = 0;
+    long copyCost = 0;
+
+    // Make sure ArrayUtil hits Integer.MAX_VALUE, if we insist:
+    while(currentSize != Integer.MAX_VALUE) {
+      int nextSize = ArrayUtil.oversize(1+currentSize, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
+      assertTrue(nextSize > currentSize);
+      if (currentSize > 0) {
+        copyCost += currentSize;
+        double copyCostPerElement = ((double) copyCost)/currentSize;
+        assertTrue("cost " + copyCostPerElement, copyCostPerElement < 10.0);
+      }
+      currentSize = nextSize;
+    }
+  }
+
+  public void testMaxSize() {
+    // intentionally pass invalid elemSizes:
+    for(int elemSize=0;elemSize<10;elemSize++) {
+      assertEquals(Integer.MAX_VALUE, ArrayUtil.oversize(Integer.MAX_VALUE, elemSize));
+      assertEquals(Integer.MAX_VALUE, ArrayUtil.oversize(Integer.MAX_VALUE-1, elemSize));
+    }
+  }
+
+  public void testInvalidElementSizes() {
+    int num = atLeast(10000);
+    for (int iter = 0; iter < num; iter++) {
+      final int minTargetSize = random.nextInt(Integer.MAX_VALUE);
+      final int elemSize = random.nextInt(11);
+      final int v = ArrayUtil.oversize(minTargetSize, elemSize);
+      assertTrue(v >= minTargetSize);
+    }
+  }
+
+  public void testParseInt() throws Exception {
+    int test;
+    try {
+      test = ArrayUtil.parseInt("".toCharArray());
+      assertTrue(false);
+    } catch (NumberFormatException e) {
+      //expected
+    }
+    try {
+      test = ArrayUtil.parseInt("foo".toCharArray());
+      assertTrue(false);
+    } catch (NumberFormatException e) {
+      //expected
+    }
+    try {
+      test = ArrayUtil.parseInt(String.valueOf(Long.MAX_VALUE).toCharArray());
+      assertTrue(false);
+    } catch (NumberFormatException e) {
+      //expected
+    }
+    try {
+      test = ArrayUtil.parseInt("0.34".toCharArray());
+      assertTrue(false);
+    } catch (NumberFormatException e) {
+      //expected
+    }
+
+    try {
+      test = ArrayUtil.parseInt("1".toCharArray());
+      assertTrue(test + " does not equal: " + 1, test == 1);
+      test = ArrayUtil.parseInt("-10000".toCharArray());
+      assertTrue(test + " does not equal: " + -10000, test == -10000);
+      test = ArrayUtil.parseInt("1923".toCharArray());
+      assertTrue(test + " does not equal: " + 1923, test == 1923);
+      test = ArrayUtil.parseInt("-1".toCharArray());
+      assertTrue(test + " does not equal: " + -1, test == -1);
+      test = ArrayUtil.parseInt("foo 1923 bar".toCharArray(), 4, 4);
+      assertTrue(test + " does not equal: " + 1923, test == 1923);
+    } catch (NumberFormatException e) {
+      e.printStackTrace();
+      assertTrue(false);
+    }
+
+  }
+
+  
+  private Integer[] createRandomArray(int maxSize) {
+    final Integer[] a = new Integer[random.nextInt(maxSize) + 1];
+    for (int i = 0; i < a.length; i++) {
+      a[i] = Integer.valueOf(random.nextInt(a.length));
+    }
+    return a;
+  }
+  
+  public void testQuickSort() {
+    int num = atLeast(50);
+    for (int i = 0; i < num; i++) {
+      Integer[] a1 = createRandomArray(1000), a2 = a1.clone();
+      ArrayUtil.quickSort(a1);
+      Arrays.sort(a2);
+      assertArrayEquals(a2, a1);
+      
+      a1 = createRandomArray(1000);
+      a2 = a1.clone();
+      ArrayUtil.quickSort(a1, Collections.reverseOrder());
+      Arrays.sort(a2, Collections.reverseOrder());
+      assertArrayEquals(a2, a1);
+      // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+      ArrayUtil.quickSort(a1);
+      Arrays.sort(a2);
+      assertArrayEquals(a2, a1);
+    }
+  }
+  
+  private Integer[] createSparseRandomArray(int maxSize) {
+    final Integer[] a = new Integer[random.nextInt(maxSize) + 1];
+    for (int i = 0; i < a.length; i++) {
+      a[i] = Integer.valueOf(random.nextInt(2));
+    }
+    return a;
+  }
+  
+  // This is a test for LUCENE-3054 (which fails without the merge sort fall back with stack overflow in most cases)
+  public void testQuickToMergeSortFallback() {
+    int num = atLeast(50);
+    for (int i = 0; i < num; i++) {
+      Integer[] a1 = createSparseRandomArray(40000), a2 = a1.clone();
+      ArrayUtil.quickSort(a1);
+      Arrays.sort(a2);
+      assertArrayEquals(a2, a1);
+    }
+  }
+  
+  public void testMergeSort() {
+    int num = atLeast(50);
+    for (int i = 0; i < num; i++) {
+      Integer[] a1 = createRandomArray(1000), a2 = a1.clone();
+      ArrayUtil.mergeSort(a1);
+      Arrays.sort(a2);
+      assertArrayEquals(a2, a1);
+      
+      a1 = createRandomArray(1000);
+      a2 = a1.clone();
+      ArrayUtil.mergeSort(a1, Collections.reverseOrder());
+      Arrays.sort(a2, Collections.reverseOrder());
+      assertArrayEquals(a2, a1);
+      // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+      ArrayUtil.mergeSort(a1);
+      Arrays.sort(a2);
+      assertArrayEquals(a2, a1);
+    }
+  }
+  
+  public void testInsertionSort() {
+    for (int i = 0, c = atLeast(500); i < c; i++) {
+      Integer[] a1 = createRandomArray(30), a2 = a1.clone();
+      ArrayUtil.insertionSort(a1);
+      Arrays.sort(a2);
+      assertArrayEquals(a2, a1);
+      
+      a1 = createRandomArray(30);
+      a2 = a1.clone();
+      ArrayUtil.insertionSort(a1, Collections.reverseOrder());
+      Arrays.sort(a2, Collections.reverseOrder());
+      assertArrayEquals(a2, a1);
+      // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+      ArrayUtil.insertionSort(a1);
+      Arrays.sort(a2);
+      assertArrayEquals(a2, a1);
+    }
+  }
+  
+  static class Item implements Comparable<Item> {
+    final int val, order;
+    
+    Item(int val, int order) {
+      this.val = val;
+      this.order = order;
+    }
+    
+    public int compareTo(Item other) {
+      return this.order - other.order;
+    }
+    
+    @Override
+    public String toString() {
+      return Integer.toString(val);
+    }
+  }
+  
+  public void testMergeSortStability() {
+    Item[] items = new Item[100];
+    for (int i = 0; i < items.length; i++) {
+      // half of the items have value but same order. The value of this items is sorted,
+      // so they should always be in order after sorting.
+      // The other half has defined order, but no (-1) value (they should appear after
+      // all above, when sorted).
+      final boolean equal = random.nextBoolean();
+      items[i] = new Item(equal ? (i+1) : -1, equal ? 0 : (random.nextInt(1000)+1));
+    }
+    
+    if (VERBOSE) System.out.println("Before: " + Arrays.toString(items));
+    // if you replace this with ArrayUtil.quickSort(), test should fail:
+    ArrayUtil.mergeSort(items);
+    if (VERBOSE) System.out.println("Sorted: " + Arrays.toString(items));
+    
+    Item last = items[0];
+    for (int i = 1; i < items.length; i++) {
+      final Item act = items[i];
+      if (act.order == 0) {
+        // order of "equal" items should be not mixed up
+        assertTrue(act.val > last.val);
+      }
+      assertTrue(act.order >= last.order);
+      last = act;
+    }
+  }
+  
+  // should produce no exceptions
+  public void testEmptyArraySort() {
+    Integer[] a = new Integer[0];
+    ArrayUtil.quickSort(a);
+    ArrayUtil.mergeSort(a);
+    ArrayUtil.insertionSort(a);
+    ArrayUtil.quickSort(a, Collections.reverseOrder());
+    ArrayUtil.mergeSort(a, Collections.reverseOrder());
+    ArrayUtil.insertionSort(a, Collections.reverseOrder());
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestAttributeSource.java b/lucene/backwards/src/test/org/apache/lucene/util/TestAttributeSource.java
new file mode 100644
index 0000000..cd3c580
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestAttributeSource.java
@@ -0,0 +1,256 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.tokenattributes.*;
+
+import java.util.Iterator;
+import java.util.HashMap;
+import java.util.Map;
+
+public class TestAttributeSource extends LuceneTestCase {
+
+  public void testCaptureState() {
+    // init a first instance
+    AttributeSource src = new AttributeSource();
+    CharTermAttribute termAtt = src.addAttribute(CharTermAttribute.class);
+    TypeAttribute typeAtt = src.addAttribute(TypeAttribute.class);
+    termAtt.append("TestTerm");
+    typeAtt.setType("TestType");
+    final int hashCode = src.hashCode();
+    
+    AttributeSource.State state = src.captureState();
+    
+    // modify the attributes
+    termAtt.setEmpty().append("AnotherTestTerm");
+    typeAtt.setType("AnotherTestType");
+    assertTrue("Hash code should be different", hashCode != src.hashCode());
+    
+    src.restoreState(state);
+    assertEquals("TestTerm", termAtt.toString());
+    assertEquals("TestType", typeAtt.type());
+    assertEquals("Hash code should be equal after restore", hashCode, src.hashCode());
+
+    // restore into an exact configured copy
+    AttributeSource copy = new AttributeSource();
+    copy.addAttribute(CharTermAttribute.class);
+    copy.addAttribute(TypeAttribute.class);
+    copy.restoreState(state);
+    assertEquals("Both AttributeSources should have same hashCode after restore", src.hashCode(), copy.hashCode());
+    assertEquals("Both AttributeSources should be equal after restore", src, copy);
+    
+    // init a second instance (with attributes in different order and one additional attribute)
+    AttributeSource src2 = new AttributeSource();
+    typeAtt = src2.addAttribute(TypeAttribute.class);
+    FlagsAttribute flagsAtt = src2.addAttribute(FlagsAttribute.class);
+    termAtt = src2.addAttribute(CharTermAttribute.class);
+    flagsAtt.setFlags(12345);
+
+    src2.restoreState(state);
+    assertEquals("TestTerm", termAtt.toString());
+    assertEquals("TestType", typeAtt.type());
+    assertEquals("FlagsAttribute should not be touched", 12345, flagsAtt.getFlags());
+
+    // init a third instance missing one Attribute
+    AttributeSource src3 = new AttributeSource();
+    termAtt = src3.addAttribute(CharTermAttribute.class);
+    try {
+      src3.restoreState(state);
+      fail("The third instance is missing the TypeAttribute, so restoreState() should throw IllegalArgumentException");
+    } catch (IllegalArgumentException iae) {
+      // pass
+    }
+  }
+  
+  public void testCloneAttributes() {
+    final AttributeSource src = new AttributeSource();
+    final FlagsAttribute flagsAtt = src.addAttribute(FlagsAttribute.class);
+    final TypeAttribute typeAtt = src.addAttribute(TypeAttribute.class);
+    flagsAtt.setFlags(1234);
+    typeAtt.setType("TestType");
+    
+    final AttributeSource clone = src.cloneAttributes();
+    final Iterator<Class<? extends Attribute>> it = clone.getAttributeClassesIterator();
+    assertEquals("FlagsAttribute must be the first attribute", FlagsAttribute.class, it.next());
+    assertEquals("TypeAttribute must be the second attribute", TypeAttribute.class, it.next());
+    assertFalse("No more attributes", it.hasNext());
+    
+    final FlagsAttribute flagsAtt2 = clone.getAttribute(FlagsAttribute.class);
+    final TypeAttribute typeAtt2 = clone.getAttribute(TypeAttribute.class);
+    assertNotSame("FlagsAttribute of original and clone must be different instances", flagsAtt2, flagsAtt);
+    assertNotSame("TypeAttribute of original and clone must be different instances", typeAtt2, typeAtt);
+    assertEquals("FlagsAttribute of original and clone must be equal", flagsAtt2, flagsAtt);
+    assertEquals("TypeAttribute of original and clone must be equal", typeAtt2, typeAtt);
+    
+    // test copy back
+    flagsAtt2.setFlags(4711);
+    typeAtt2.setType("OtherType");
+    clone.copyTo(src);
+    assertEquals("FlagsAttribute of original must now contain updated term", 4711, flagsAtt.getFlags());
+    assertEquals("TypeAttribute of original must now contain updated type", "OtherType", typeAtt.type());
+    // verify again:
+    assertNotSame("FlagsAttribute of original and clone must be different instances", flagsAtt2, flagsAtt);
+    assertNotSame("TypeAttribute of original and clone must be different instances", typeAtt2, typeAtt);
+    assertEquals("FlagsAttribute of original and clone must be equal", flagsAtt2, flagsAtt);
+    assertEquals("TypeAttribute of original and clone must be equal", typeAtt2, typeAtt);
+  }
+  
+  public void testToStringAndMultiAttributeImplementations() {
+    AttributeSource src = new AttributeSource();
+    CharTermAttribute termAtt = src.addAttribute(CharTermAttribute.class);
+    TypeAttribute typeAtt = src.addAttribute(TypeAttribute.class);
+    termAtt.append("TestTerm");
+    typeAtt.setType("TestType");    
+    assertEquals("Attributes should appear in original order", "("+termAtt.toString()+","+typeAtt.toString()+")", src.toString());
+    Iterator<AttributeImpl> it = src.getAttributeImplsIterator();
+    assertTrue("Iterator should have 2 attributes left", it.hasNext());
+    assertSame("First AttributeImpl from iterator should be termAtt", termAtt, it.next());
+    assertTrue("Iterator should have 1 attributes left", it.hasNext());
+    assertSame("Second AttributeImpl from iterator should be typeAtt", typeAtt, it.next());
+    assertFalse("Iterator should have 0 attributes left", it.hasNext());
+
+    src = new AttributeSource();
+    src.addAttributeImpl(new Token());
+    // this should not add a new attribute as Token implements CharTermAttribute, too
+    termAtt = src.addAttribute(CharTermAttribute.class);
+    assertTrue("CharTermAttribute should be implemented by Token", termAtt instanceof Token);
+    // get the Token attribute and check, that it is the only one
+    it = src.getAttributeImplsIterator();
+    Token tok = (Token) it.next();
+    assertFalse("There should be only one attribute implementation instance", it.hasNext());
+    
+    termAtt.setEmpty().append("TestTerm");
+    assertEquals("Token should only printed once", "("+tok.toString()+")", src.toString());
+  }
+  
+  public void testDefaultAttributeFactory() throws Exception {
+    AttributeSource src = new AttributeSource();
+    
+    assertTrue("CharTermAttribute is not implemented by CharTermAttributeImpl",
+      src.addAttribute(CharTermAttribute.class) instanceof CharTermAttributeImpl);
+    assertTrue("OffsetAttribute is not implemented by OffsetAttributeImpl",
+      src.addAttribute(OffsetAttribute.class) instanceof OffsetAttributeImpl);
+    assertTrue("FlagsAttribute is not implemented by FlagsAttributeImpl",
+      src.addAttribute(FlagsAttribute.class) instanceof FlagsAttributeImpl);
+    assertTrue("PayloadAttribute is not implemented by PayloadAttributeImpl",
+      src.addAttribute(PayloadAttribute.class) instanceof PayloadAttributeImpl);
+    assertTrue("PositionIncrementAttribute is not implemented by PositionIncrementAttributeImpl", 
+      src.addAttribute(PositionIncrementAttribute.class) instanceof PositionIncrementAttributeImpl);
+    assertTrue("TypeAttribute is not implemented by TypeAttributeImpl",
+      src.addAttribute(TypeAttribute.class) instanceof TypeAttributeImpl);
+  }
+  
+  @SuppressWarnings("unchecked")
+  public void testInvalidArguments() throws Exception {
+    try {
+      AttributeSource src = new AttributeSource();
+      src.addAttribute(Token.class);
+      fail("Should throw IllegalArgumentException");
+    } catch (IllegalArgumentException iae) {}
+    
+    try {
+      AttributeSource src = new AttributeSource(Token.TOKEN_ATTRIBUTE_FACTORY);
+      src.addAttribute(Token.class);
+      fail("Should throw IllegalArgumentException");
+    } catch (IllegalArgumentException iae) {}
+    
+    try {
+      AttributeSource src = new AttributeSource();
+      // break this by unsafe cast
+      src.addAttribute((Class) Iterator.class);
+      fail("Should throw IllegalArgumentException");
+    } catch (IllegalArgumentException iae) {}
+  }
+  
+  public void testLUCENE_3042() throws Exception {
+    final AttributeSource src1 = new AttributeSource();
+    src1.addAttribute(CharTermAttribute.class).append("foo");
+    int hash1 = src1.hashCode(); // this triggers a cached state
+    final AttributeSource src2 = new AttributeSource(src1);
+    src2.addAttribute(TypeAttribute.class).setType("bar");
+    assertTrue("The hashCode is identical, so the captured state was preserved.", hash1 != src1.hashCode());
+    assertEquals(src2.hashCode(), src1.hashCode());
+  }
+  
+  // this class is included in external class check, so no assertion errors occur
+  @Deprecated
+  static class TestAttributeImpl extends AttributeImpl implements FlagsAttribute {
+  
+    private int flags = 0;
+    
+    public int getFlags() { return flags; }
+    public void setFlags(int flags) { this.flags = flags; }
+    
+    @Override
+    public void clear() { flags = 0; }
+    
+    @Override
+    public void copyTo(AttributeImpl target) {
+      FlagsAttribute t = (FlagsAttribute) target;
+      t.setFlags(flags);
+    }
+    
+    @Override
+    public String toString() {
+      return "foo=bar,moo=mae";
+    }
+  
+  }
+  
+  // this class is excluded in external class check, so assertion on calling reflectWith should occur
+  @Deprecated
+  static class TestAttributeImpl2 extends TestAttributeImpl {}
+  
+  @Deprecated
+  public void testReflectionOfToString() throws Exception {
+    final AttributeSource src = new AttributeSource();
+    final AttributeImpl att = new TestAttributeImpl();
+    src.addAttributeImpl(att);
+    
+    assertSame("FlagsAttribute is not implemented by same instance of TestAttributeImpl",
+      att, src.addAttribute(FlagsAttribute.class));
+    
+    final Map<String,Object> map = new HashMap<String,Object>();
+    final AttributeReflector reflector = new AttributeReflector() {
+      public void reflect(Class<? extends Attribute> attClass, String key, Object value) {
+        assertSame(FlagsAttribute.class, attClass);
+        map.put(key, value);
+      }
+    };
+    att.reflectWith(reflector);
+    assertEquals(2, map.size());
+    assertEquals("bar", map.get("foo"));
+    assertEquals("mae", map.get("moo"));
+    
+    map.clear();
+    src.reflectWith(reflector);
+    assertEquals(2, map.size());
+    assertEquals("bar", map.get("foo"));
+    assertEquals("mae", map.get("moo"));
+    
+    map.clear();
+    try {
+      new TestAttributeImpl2().reflectWith(reflector);
+      fail("TestAttributeImpl2 should fail assertion on toString() parsing");
+    } catch (AssertionError e) {
+      // pass
+    }
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestBitUtil.java b/lucene/backwards/src/test/org/apache/lucene/util/TestBitUtil.java
new file mode 100644
index 0000000..67e1b15
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestBitUtil.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.util;
+
+public class TestBitUtil extends LuceneTestCase {
+
+  private static int slowNlz(long x) {
+    if (x == 0L) return 64;
+    int nlz = 0;
+    while ( ((x << nlz) & (1L << 63)) == 0) {
+      nlz++;
+    }
+    return nlz;
+  }
+
+  private void checkNlz(long x) {
+    assertEquals(slowNlz(x), BitUtil.nlz(x));
+    assertEquals(Long.numberOfLeadingZeros(x), BitUtil.nlz(x));
+  }
+  
+  public void testNlz() {
+    checkNlz(0L);
+    checkNlz(1L);
+    checkNlz(-1L);
+    for (int i = 1; i <= 63; i++) {
+      checkNlz(1L << i);
+      checkNlz((1L << i) + (1L << (i>>1)));
+    }
+  }
+
+  public void testBitUtils() {
+    long num = 100000;
+    assertEquals( 5, BitUtil.ntz(num) );
+    assertEquals( 5, BitUtil.ntz2(num) );
+    assertEquals( 5, BitUtil.ntz3(num) );
+    
+    num = 10;
+    assertEquals( 1, BitUtil.ntz(num) );
+    assertEquals( 1, BitUtil.ntz2(num) );
+    assertEquals( 1, BitUtil.ntz3(num) );
+
+    for (int i=0; i<64; i++) {
+      num = 1L << i;
+      assertEquals( i, BitUtil.ntz(num) );
+      assertEquals( i, BitUtil.ntz2(num) );
+      assertEquals( i, BitUtil.ntz3(num) );
+    }
+  }
+
+
+  private long testArg(int shift) {
+    return (1L << shift) + (1L << (shift>>1));
+  }
+  
+  private long nlzBitUtilBasicLoop(int iters) {
+    long sumRes = 0;
+    while (iters-- >= 0) {
+      for (int i = 1; i <= 63; i++) {
+      	long a = testArg(i);
+	sumRes += BitUtil.nlz(a);
+	sumRes += BitUtil.nlz(a+1);
+	sumRes += BitUtil.nlz(a-1);
+	sumRes += BitUtil.nlz(a+10);
+	sumRes += BitUtil.nlz(a-10);
+      }
+    }
+    return sumRes;
+  }
+    
+  private long nlzLongBasicLoop(int iters) {
+    long sumRes = 0;
+    while (iters-- >= 0) {
+      for (int i = 1; i <= 63; i++) {
+      	long a = testArg(i);
+	sumRes += Long.numberOfLeadingZeros(a);
+	sumRes += Long.numberOfLeadingZeros(a+1);
+	sumRes += Long.numberOfLeadingZeros(a-1);
+	sumRes += Long.numberOfLeadingZeros(a+10);
+	sumRes += Long.numberOfLeadingZeros(a-10);
+      }
+    }
+    return sumRes;
+  }
+
+  public void tstPerfNlz() { // See LUCENE-3197, prefer to use Long.numberOfLeadingZeros() over BitUtil.nlz().
+    final long measureMilliSecs = 2000;
+    final int basicIters = 100000;
+    long startTime;
+    long endTime;
+    long curTime;
+    long dummy = 0; // avoid optimizing away
+
+    dummy = 0;
+    int bitUtilLoops = 0;
+    startTime = System.currentTimeMillis();
+    endTime = startTime + measureMilliSecs;
+    do {
+      dummy += nlzBitUtilBasicLoop(basicIters);
+      bitUtilLoops++;
+      curTime = System.currentTimeMillis();
+    } while (curTime < endTime);
+    int bitUtilPsTime = (int) (1000000000 * (curTime - startTime) / (basicIters * 5 * 63 * (float) bitUtilLoops));
+    System.out.println("BitUtil nlz time: " + (bitUtilPsTime/1) + " picosec/call, dummy: " + dummy);
+
+
+    dummy = 0;
+    int longLoops = 0;
+    startTime = System.currentTimeMillis();
+    endTime = startTime + measureMilliSecs;
+    do {
+      dummy += nlzLongBasicLoop(basicIters);
+      longLoops++;
+      curTime = System.currentTimeMillis();
+    } while (curTime < endTime);
+    int longPsTime = (int) (1000000000 * (curTime - startTime) / (basicIters * 5 * 63 * (float) longLoops));
+    System.out.println("Long    nlz time: " + longPsTime + " picosec/call, dummy: " + dummy);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestBitVector.java b/lucene/backwards/src/test/org/apache/lucene/util/TestBitVector.java
new file mode 100644
index 0000000..7721940
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestBitVector.java
@@ -0,0 +1,214 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+
+/**
+ * <code>TestBitVector</code> tests the <code>BitVector</code>, obviously.
+ */
+public class TestBitVector extends LuceneTestCase
+{
+
+    /**
+     * Test the default constructor on BitVectors of various sizes.
+     * @throws Exception
+     */
+    public void testConstructSize() throws Exception {
+        doTestConstructOfSize(8);
+        doTestConstructOfSize(20);
+        doTestConstructOfSize(100);
+        doTestConstructOfSize(1000);
+    }
+
+    private void doTestConstructOfSize(int n) {
+        BitVector bv = new BitVector(n);
+        assertEquals(n,bv.size());
+    }
+
+    /**
+     * Test the get() and set() methods on BitVectors of various sizes.
+     * @throws Exception
+     */
+    public void testGetSet() throws Exception {
+        doTestGetSetVectorOfSize(8);
+        doTestGetSetVectorOfSize(20);
+        doTestGetSetVectorOfSize(100);
+        doTestGetSetVectorOfSize(1000);
+    }
+
+    private void doTestGetSetVectorOfSize(int n) {
+        BitVector bv = new BitVector(n);
+        for(int i=0;i<bv.size();i++) {
+            // ensure a set bit can be git'
+            assertFalse(bv.get(i));
+            bv.set(i);
+            assertTrue(bv.get(i));
+        }
+    }
+
+    /**
+     * Test the clear() method on BitVectors of various sizes.
+     * @throws Exception
+     */
+    public void testClear() throws Exception {
+        doTestClearVectorOfSize(8);
+        doTestClearVectorOfSize(20);
+        doTestClearVectorOfSize(100);
+        doTestClearVectorOfSize(1000);
+    }
+
+    private void doTestClearVectorOfSize(int n) {
+        BitVector bv = new BitVector(n);
+        for(int i=0;i<bv.size();i++) {
+            // ensure a set bit is cleared
+            assertFalse(bv.get(i));
+            bv.set(i);
+            assertTrue(bv.get(i));
+            bv.clear(i);
+            assertFalse(bv.get(i));
+        }
+    }
+
+    /**
+     * Test the count() method on BitVectors of various sizes.
+     * @throws Exception
+     */
+    public void testCount() throws Exception {
+        doTestCountVectorOfSize(8);
+        doTestCountVectorOfSize(20);
+        doTestCountVectorOfSize(100);
+        doTestCountVectorOfSize(1000);
+    }
+
+    private void doTestCountVectorOfSize(int n) {
+        BitVector bv = new BitVector(n);
+        // test count when incrementally setting bits
+        for(int i=0;i<bv.size();i++) {
+            assertFalse(bv.get(i));
+            assertEquals(i,bv.count());
+            bv.set(i);
+            assertTrue(bv.get(i));
+            assertEquals(i+1,bv.count());
+        }
+
+        bv = new BitVector(n);
+        // test count when setting then clearing bits
+        for(int i=0;i<bv.size();i++) {
+            assertFalse(bv.get(i));
+            assertEquals(0,bv.count());
+            bv.set(i);
+            assertTrue(bv.get(i));
+            assertEquals(1,bv.count());
+            bv.clear(i);
+            assertFalse(bv.get(i));
+            assertEquals(0,bv.count());
+        }
+    }
+
+    /**
+     * Test writing and construction to/from Directory.
+     * @throws Exception
+     */
+    public void testWriteRead() throws Exception {
+        doTestWriteRead(8);
+        doTestWriteRead(20);
+        doTestWriteRead(100);
+        doTestWriteRead(1000);
+    }
+
+    private void doTestWriteRead(int n) throws Exception {
+        MockDirectoryWrapper d = new  MockDirectoryWrapper(random, new RAMDirectory());
+        d.setPreventDoubleWrite(false);
+        BitVector bv = new BitVector(n);
+        // test count when incrementally setting bits
+        for(int i=0;i<bv.size();i++) {
+            assertFalse(bv.get(i));
+            assertEquals(i,bv.count());
+            bv.set(i);
+            assertTrue(bv.get(i));
+            assertEquals(i+1,bv.count());
+            bv.write(d, "TESTBV");
+            BitVector compare = new BitVector(d, "TESTBV");
+            // compare bit vectors with bits set incrementally
+            assertTrue(doCompare(bv,compare));
+        }
+    }
+
+    /**
+     * Test r/w when size/count cause switching between bit-set and d-gaps file formats.  
+     */
+    public void testDgaps() throws IOException {
+      doTestDgaps(1,0,1);
+      doTestDgaps(10,0,1);
+      doTestDgaps(100,0,1);
+      doTestDgaps(1000,4,7);
+      doTestDgaps(10000,40,43);
+      doTestDgaps(100000,415,418);
+      doTestDgaps(1000000,3123,3126);
+    }
+    
+    private void doTestDgaps(int size, int count1, int count2) throws IOException {
+      MockDirectoryWrapper d = new  MockDirectoryWrapper(random, new RAMDirectory());
+      d.setPreventDoubleWrite(false);
+      BitVector bv = new BitVector(size);
+      for (int i=0; i<count1; i++) {
+        bv.set(i);
+        assertEquals(i+1,bv.count());
+      }
+      bv.write(d, "TESTBV");
+      // gradually increase number of set bits
+      for (int i=count1; i<count2; i++) {
+        BitVector bv2 = new BitVector(d, "TESTBV");
+        assertTrue(doCompare(bv,bv2));
+        bv = bv2;
+        bv.set(i);
+        assertEquals(i+1,bv.count());
+        bv.write(d, "TESTBV");
+      }
+      // now start decreasing number of set bits
+      for (int i=count2-1; i>=count1; i--) {
+        BitVector bv2 = new BitVector(d, "TESTBV");
+        assertTrue(doCompare(bv,bv2));
+        bv = bv2;
+        bv.clear(i);
+        assertEquals(i,bv.count());
+        bv.write(d, "TESTBV");
+      }
+    }
+    /**
+     * Compare two BitVectors.
+     * This should really be an equals method on the BitVector itself.
+     * @param bv One bit vector
+     * @param compare The second to compare
+     */
+    private boolean doCompare(BitVector bv, BitVector compare) {
+        boolean equal = true;
+        for(int i=0;i<bv.size();i++) {
+            // bits must be equal
+            if(bv.get(i)!=compare.get(i)) {
+                equal = false;
+                break;
+            }
+        }
+        return equal;
+    }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestByteBlockPool.java b/lucene/backwards/src/test/org/apache/lucene/util/TestByteBlockPool.java
new file mode 100644
index 0000000..ef12523
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestByteBlockPool.java
@@ -0,0 +1,67 @@
+package org.apache.lucene.util;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.RAMDirectory;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+public class TestByteBlockPool extends LuceneTestCase {
+
+  public void testCopyRefAndWrite() throws IOException {
+    List<String> list = new ArrayList<String>();
+    int maxLength = atLeast(500);
+    ByteBlockPool pool = new ByteBlockPool(new ByteBlockPool.DirectAllocator());
+    pool.nextBuffer();
+    final int numValues = atLeast(100);
+    BytesRef ref = new BytesRef();
+    for (int i = 0; i < numValues; i++) {
+      final String value = _TestUtil.randomRealisticUnicodeString(random,
+          maxLength);
+      list.add(value);
+      ref.copy(value);
+      pool.copy(ref);
+    }
+    RAMDirectory dir = new RAMDirectory();
+    IndexOutput stream = dir.createOutput("foo.txt");
+    pool.writePool(stream);
+    stream.flush();
+    stream.close();
+    IndexInput input = dir.openInput("foo.txt");
+    assertEquals(pool.byteOffset + pool.byteUpto, stream.length());
+    BytesRef expected = new BytesRef();
+    BytesRef actual = new BytesRef();
+    for (String string : list) {
+      expected.copy(string);
+      actual.grow(expected.length);
+      actual.length = expected.length;
+      input.readBytes(actual.bytes, 0, actual.length);
+      assertEquals(expected, actual);
+    }
+    try {
+      input.readByte();
+      fail("must be EOF");
+    } catch (IOException e) {
+      // expected - read past EOF
+    }
+    dir.close();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestBytesRefHash.java b/lucene/backwards/src/test/org/apache/lucene/util/TestBytesRefHash.java
new file mode 100644
index 0000000..b677ff9
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestBytesRefHash.java
@@ -0,0 +1,347 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.BitSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+import java.util.Map.Entry;
+
+import org.apache.lucene.util.BytesRefHash.MaxBytesLengthExceededException;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class TestBytesRefHash extends LuceneTestCase {
+
+  BytesRefHash hash;
+  ByteBlockPool pool;
+  
+  /**
+   */
+  @Override
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+    pool = newPool();
+    hash = newHash(pool);
+  }
+  
+  private ByteBlockPool newPool(){
+    return  random.nextBoolean() && pool != null ? pool
+        : new ByteBlockPool(new RecyclingByteBlockAllocator(ByteBlockPool.BYTE_BLOCK_SIZE, random.nextInt(25)));
+  }
+  
+  private BytesRefHash newHash(ByteBlockPool blockPool) {
+    final int initSize = 2 << 1 + random.nextInt(5);
+    return random.nextBoolean() ? new BytesRefHash(blockPool) : new BytesRefHash(
+        blockPool, initSize, new BytesRefHash.DirectBytesStartArray(initSize));
+  }
+
+  /**
+   * Test method for {@link org.apache.lucene.util.BytesRefHash#size()}.
+   */
+  @Test
+  public void testSize() {
+    BytesRef ref = new BytesRef();
+    int num = atLeast(2);
+    for (int j = 0; j < num; j++) {
+      final int mod = 1+random.nextInt(39);
+      for (int i = 0; i < 797; i++) {
+        String str;
+        do {
+          str = _TestUtil.randomRealisticUnicodeString(random, 1000);
+        } while (str.length() == 0);
+        ref.copy(str);
+        int count = hash.size();
+        int key = hash.add(ref);
+        if (key < 0)
+          assertEquals(hash.size(), count);
+        else
+          assertEquals(hash.size(), count + 1);
+        if(i % mod == 0) {
+          hash.clear();
+          assertEquals(0, hash.size());
+          hash.reinit();
+        }
+      }
+    }
+  }
+
+  /**
+   * Test method for
+   * {@link org.apache.lucene.util.BytesRefHash#get(org.apache.lucene.util.BytesRefHash.Entry)}
+   * .
+   */
+  @Test
+  public void testGet() {
+    BytesRef ref = new BytesRef();
+    BytesRef scratch = new BytesRef();
+    int num = atLeast(2);
+    for (int j = 0; j < num; j++) {
+      Map<String, Integer> strings = new HashMap<String, Integer>();
+      int uniqueCount = 0;
+      for (int i = 0; i < 797; i++) {
+        String str;
+        do {
+          str = _TestUtil.randomRealisticUnicodeString(random, 1000);
+        } while (str.length() == 0);
+        ref.copy(str);
+        int count = hash.size();
+        int key = hash.add(ref);
+        if (key >= 0) {
+          assertNull(strings.put(str, Integer.valueOf(key)));
+          assertEquals(uniqueCount, key);
+          uniqueCount++;
+          assertEquals(hash.size(), count + 1);
+        } else {
+          assertTrue((-key)-1 < count);
+          assertEquals(hash.size(), count);
+        }
+      }
+      for (Entry<String, Integer> entry : strings.entrySet()) {
+        ref.copy(entry.getKey());
+        assertEquals(ref, hash.get(entry.getValue().intValue(), scratch));
+      }
+      hash.clear();
+      assertEquals(0, hash.size());
+      hash.reinit();
+    }
+  }
+
+  /**
+   * Test method for {@link org.apache.lucene.util.BytesRefHash#compact()}.
+   */
+  @Test
+  public void testCompact() {
+    BytesRef ref = new BytesRef();
+    int num = atLeast(2);
+    for (int j = 0; j < num; j++) {
+      int numEntries = 0;
+      final int size = 797;
+      BitSet bits = new BitSet(size);
+      for (int i = 0; i < size; i++) {
+        String str;
+        do {
+          str = _TestUtil.randomRealisticUnicodeString(random, 1000);
+        } while (str.length() == 0);
+        ref.copy(str);
+        final int key = hash.add(ref);
+        if (key < 0) {
+          assertTrue(bits.get((-key)-1));
+        } else {
+          assertFalse(bits.get(key));
+          bits.set(key);
+          numEntries++;
+        }
+      }
+      assertEquals(hash.size(), bits.cardinality());
+      assertEquals(numEntries, bits.cardinality());
+      assertEquals(numEntries, hash.size());
+      int[] compact = hash.compact();
+      assertTrue(numEntries < compact.length);
+      for (int i = 0; i < numEntries; i++) {
+        bits.set(compact[i], false);
+      }
+      assertEquals(0, bits.cardinality());
+      hash.clear();
+      assertEquals(0, hash.size());
+      hash.reinit();
+    }
+  }
+
+  /**
+   * Test method for
+   * {@link org.apache.lucene.util.BytesRefHash#sort(java.util.Comparator)}.
+   */
+  @Test
+  public void testSort() {
+    BytesRef ref = new BytesRef();
+    int num = atLeast(2);
+    for (int j = 0; j < num; j++) {
+      SortedSet<String> strings = new TreeSet<String>();
+      for (int i = 0; i < 797; i++) {
+        String str;
+        do {
+          str = _TestUtil.randomRealisticUnicodeString(random, 1000);
+        } while (str.length() == 0);
+        ref.copy(str);
+        hash.add(ref);
+        strings.add(str);
+      }
+      // We use the UTF-16 comparator here, because we need to be able to
+      // compare to native String.compareTo() [UTF-16]:
+      int[] sort = hash.sort(BytesRef.getUTF8SortedAsUTF16Comparator());
+      assertTrue(strings.size() < sort.length);
+      int i = 0;
+      BytesRef scratch = new BytesRef();
+      for (String string : strings) {
+        ref.copy(string);
+        assertEquals(ref, hash.get(sort[i++], scratch));
+      }
+      hash.clear();
+      assertEquals(0, hash.size());
+      hash.reinit();
+
+    }
+  }
+
+  /**
+   * Test method for
+   * {@link org.apache.lucene.util.BytesRefHash#add(org.apache.lucene.util.BytesRef)}
+   * .
+   */
+  @Test
+  public void testAdd() {
+    BytesRef ref = new BytesRef();
+    BytesRef scratch = new BytesRef();
+    int num = atLeast(2);
+    for (int j = 0; j < num; j++) {
+      Set<String> strings = new HashSet<String>();
+      int uniqueCount = 0;
+      for (int i = 0; i < 797; i++) {
+        String str;
+        do {
+          str = _TestUtil.randomRealisticUnicodeString(random, 1000);
+        } while (str.length() == 0);
+        ref.copy(str);
+        int count = hash.size();
+        int key = hash.add(ref);
+
+        if (key >=0) {
+          assertTrue(strings.add(str));
+          assertEquals(uniqueCount, key);
+          assertEquals(hash.size(), count + 1);
+          uniqueCount++;
+        } else {
+          assertFalse(strings.add(str));
+          assertTrue((-key)-1 < count);
+          assertEquals(str, hash.get((-key)-1, scratch).utf8ToString());
+          assertEquals(count, hash.size());
+        }
+      }
+      
+      assertAllIn(strings, hash);
+      hash.clear();
+      assertEquals(0, hash.size());
+      hash.reinit();
+    }
+  }
+
+  @Test(expected = MaxBytesLengthExceededException.class)
+  public void testLargeValue() {
+    int[] sizes = new int[] { random.nextInt(5),
+        ByteBlockPool.BYTE_BLOCK_SIZE - 33 + random.nextInt(31),
+        ByteBlockPool.BYTE_BLOCK_SIZE - 1 + random.nextInt(37) };
+    BytesRef ref = new BytesRef();
+    for (int i = 0; i < sizes.length; i++) {
+      ref.bytes = new byte[sizes[i]];
+      ref.offset = 0;
+      ref.length = sizes[i];
+      try {
+        assertEquals(i, hash.add(ref));
+      } catch (MaxBytesLengthExceededException e) {
+        if (i < sizes.length - 1)
+          fail("unexpected exception at size: " + sizes[i]);
+        throw e;
+      }
+    }
+  }
+  
+  /**
+   * Test method for
+   * {@link org.apache.lucene.util.BytesRefHash#addByPoolOffset(int)}
+   * .
+   */
+  @Test
+  public void testAddByPoolOffset() {
+    BytesRef ref = new BytesRef();
+    BytesRef scratch = new BytesRef();
+    BytesRefHash offsetHash = newHash(pool);
+    int num = atLeast(2);
+    for (int j = 0; j < num; j++) {
+      Set<String> strings = new HashSet<String>();
+      int uniqueCount = 0;
+      for (int i = 0; i < 797; i++) {
+        String str;
+        do {
+          str = _TestUtil.randomRealisticUnicodeString(random, 1000);
+        } while (str.length() == 0);
+        ref.copy(str);
+        int count = hash.size();
+        int key = hash.add(ref);
+
+        if (key >= 0) {
+          assertTrue(strings.add(str));
+          assertEquals(uniqueCount, key);
+          assertEquals(hash.size(), count + 1);
+          int offsetKey = offsetHash.addByPoolOffset(hash.byteStart(key));
+          assertEquals(uniqueCount, offsetKey);
+          assertEquals(offsetHash.size(), count + 1);
+          uniqueCount++;
+        } else {
+          assertFalse(strings.add(str));
+          assertTrue((-key)-1 < count);
+          assertEquals(str, hash.get((-key)-1, scratch).utf8ToString());
+          assertEquals(count, hash.size());
+          int offsetKey = offsetHash.addByPoolOffset(hash.byteStart((-key)-1));
+          assertTrue((-offsetKey)-1 < count);
+          assertEquals(str, hash.get((-offsetKey)-1, scratch).utf8ToString());
+          assertEquals(count, hash.size());
+        }
+      }
+      
+      assertAllIn(strings, hash);
+      for (String string : strings) {
+        ref.copy(string);
+        int key = hash.add(ref);
+        BytesRef bytesRef = offsetHash.get((-key)-1, scratch);
+        assertEquals(ref, bytesRef);
+      }
+
+      hash.clear();
+      assertEquals(0, hash.size());
+      offsetHash.clear();
+      assertEquals(0, offsetHash.size());
+      hash.reinit(); // init for the next round
+      offsetHash.reinit();
+    }
+  }
+  
+  private void assertAllIn(Set<String> strings, BytesRefHash hash) {
+    BytesRef ref = new BytesRef();
+    BytesRef scratch = new BytesRef();
+    int count = hash.size();
+    for (String string : strings) {
+      ref.copy(string);
+      int key  =  hash.add(ref); // add again to check duplicates
+      assertEquals(string, hash.get((-key)-1, scratch).utf8ToString());
+      assertEquals(count, hash.size());
+      assertTrue("key: " + key + " count: " + count + " string: " + string,
+          key < count);
+    }
+  }
+
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestCharacterUtils.java b/lucene/backwards/src/test/org/apache/lucene/util/TestCharacterUtils.java
new file mode 100644
index 0000000..69393bc
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestCharacterUtils.java
@@ -0,0 +1,192 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+import java.io.StringReader;
+
+import org.apache.lucene.util.CharacterUtils.CharacterBuffer;
+import org.junit.Test;
+
+/**
+ * TestCase for the {@link CharacterUtils} class.
+ */
+public class TestCharacterUtils extends LuceneTestCase {
+
+  @Test
+  public void testCodePointAtCharArrayInt() {
+    CharacterUtils java4 = CharacterUtils.getInstance(Version.LUCENE_30);
+    char[] cpAt3 = "Abc\ud801\udc1c".toCharArray();
+    char[] highSurrogateAt3 = "Abc\ud801".toCharArray();
+    assertEquals((int) 'A', java4.codePointAt(cpAt3, 0));
+    assertEquals((int) '\ud801', java4.codePointAt(cpAt3, 3));
+    assertEquals((int) '\ud801', java4.codePointAt(highSurrogateAt3, 3));
+    try {
+      java4.codePointAt(highSurrogateAt3, 4);
+      fail("array index out of bounds");
+    } catch (IndexOutOfBoundsException e) {
+    }
+
+    CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+    assertEquals((int) 'A', java5.codePointAt(cpAt3, 0));
+    assertEquals(Character.toCodePoint('\ud801', '\udc1c'), java5.codePointAt(
+        cpAt3, 3));
+    assertEquals((int) '\ud801', java5.codePointAt(highSurrogateAt3, 3));
+    try {
+      java5.codePointAt(highSurrogateAt3, 4);
+      fail("array index out of bounds");
+    } catch (IndexOutOfBoundsException e) {
+    }
+  }
+
+  @Test
+  public void testCodePointAtCharSequenceInt() {
+    CharacterUtils java4 = CharacterUtils.getInstance(Version.LUCENE_30);
+    String cpAt3 = "Abc\ud801\udc1c";
+    String highSurrogateAt3 = "Abc\ud801";
+    assertEquals((int) 'A', java4.codePointAt(cpAt3, 0));
+    assertEquals((int) '\ud801', java4.codePointAt(cpAt3, 3));
+    assertEquals((int) '\ud801', java4.codePointAt(highSurrogateAt3, 3));
+    try {
+      java4.codePointAt(highSurrogateAt3, 4);
+      fail("string index out of bounds");
+    } catch (IndexOutOfBoundsException e) {
+    }
+
+    CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+    assertEquals((int) 'A', java5.codePointAt(cpAt3, 0));
+    assertEquals(Character.toCodePoint('\ud801', '\udc1c'), java5.codePointAt(
+        cpAt3, 3));
+    assertEquals((int) '\ud801', java5.codePointAt(highSurrogateAt3, 3));
+    try {
+      java5.codePointAt(highSurrogateAt3, 4);
+      fail("string index out of bounds");
+    } catch (IndexOutOfBoundsException e) {
+    }
+
+  }
+
+  @Test
+  public void testCodePointAtCharArrayIntInt() {
+    CharacterUtils java4 = CharacterUtils.getInstance(Version.LUCENE_30);
+    char[] cpAt3 = "Abc\ud801\udc1c".toCharArray();
+    char[] highSurrogateAt3 = "Abc\ud801".toCharArray();
+    assertEquals((int) 'A', java4.codePointAt(cpAt3, 0, 2));
+    assertEquals((int) '\ud801', java4.codePointAt(cpAt3, 3, 5));
+    assertEquals((int) '\ud801', java4.codePointAt(highSurrogateAt3, 3, 4));
+
+    CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+    assertEquals((int) 'A', java5.codePointAt(cpAt3, 0, 2));
+    assertEquals(Character.toCodePoint('\ud801', '\udc1c'), java5.codePointAt(
+        cpAt3, 3, 5));
+    assertEquals((int) '\ud801', java5.codePointAt(highSurrogateAt3, 3, 4));
+
+  }
+
+  @Test
+  public void testNewCharacterBuffer() {
+    CharacterBuffer newCharacterBuffer = CharacterUtils.newCharacterBuffer(1024);
+    assertEquals(1024, newCharacterBuffer.getBuffer().length);
+    assertEquals(0, newCharacterBuffer.getOffset());
+    assertEquals(0, newCharacterBuffer.getLength());
+
+    newCharacterBuffer = CharacterUtils.newCharacterBuffer(2);
+    assertEquals(2, newCharacterBuffer.getBuffer().length);
+    assertEquals(0, newCharacterBuffer.getOffset());
+    assertEquals(0, newCharacterBuffer.getLength());
+
+    try {
+      newCharacterBuffer = CharacterUtils.newCharacterBuffer(1);
+      fail("length must be >= 2");
+    } catch (IllegalArgumentException e) {
+    }
+  }
+
+  @Test
+  public void testFillNoHighSurrogate() throws IOException {
+    Version[] versions = new Version[] { Version.LUCENE_30, TEST_VERSION_CURRENT };
+    for (Version version : versions) {
+      CharacterUtils instance = CharacterUtils.getInstance(version);
+      Reader reader = new StringReader("helloworld");
+      CharacterBuffer buffer = CharacterUtils.newCharacterBuffer(6);
+      assertTrue(instance.fill(buffer,reader));
+      assertEquals(0, buffer.getOffset());
+      assertEquals(6, buffer.getLength());
+      assertEquals("hellow", new String(buffer.getBuffer()));
+      assertTrue(instance.fill(buffer,reader));
+      assertEquals(4, buffer.getLength());
+      assertEquals(0, buffer.getOffset());
+
+      assertEquals("orld", new String(buffer.getBuffer(), buffer.getOffset(),
+          buffer.getLength()));
+      assertFalse(instance.fill(buffer,reader));
+    }
+  }
+
+  @Test
+  public void testFillJava15() throws IOException {
+    String input = "1234\ud801\udc1c789123\ud801\ud801\udc1c\ud801";
+    CharacterUtils instance = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+    Reader reader = new StringReader(input);
+    CharacterBuffer buffer = CharacterUtils.newCharacterBuffer(5);
+    assertTrue(instance.fill(buffer, reader));
+    assertEquals(4, buffer.getLength());
+    assertEquals("1234", new String(buffer.getBuffer(), buffer.getOffset(),
+        buffer.getLength()));
+    assertTrue(instance.fill(buffer, reader));
+    assertEquals(5, buffer.getLength());
+    assertEquals("\ud801\udc1c789", new String(buffer.getBuffer()));
+    assertTrue(instance.fill(buffer, reader));
+    assertEquals(4, buffer.getLength());
+    assertEquals("123\ud801", new String(buffer.getBuffer(),
+        buffer.getOffset(), buffer.getLength()));
+    assertTrue(instance.fill(buffer, reader));
+    assertEquals(2, buffer.getLength());
+    assertEquals("\ud801\udc1c", new String(buffer.getBuffer(), buffer
+        .getOffset(), buffer.getLength()));
+    assertTrue(instance.fill(buffer, reader));
+    assertEquals(1, buffer.getLength());
+    assertEquals("\ud801", new String(buffer.getBuffer(), buffer
+        .getOffset(), buffer.getLength()));
+    assertFalse(instance.fill(buffer, reader));
+  }
+
+  @Test
+  public void testFillJava14() throws IOException {
+    String input = "1234\ud801\udc1c789123\ud801\ud801\udc1c\ud801";
+    CharacterUtils instance = CharacterUtils.getInstance(Version.LUCENE_30);
+    Reader reader = new StringReader(input);
+    CharacterBuffer buffer = CharacterUtils.newCharacterBuffer(5);
+    assertTrue(instance.fill(buffer, reader));
+    assertEquals(5, buffer.getLength());
+    assertEquals("1234\ud801", new String(buffer.getBuffer(), buffer
+        .getOffset(), buffer.getLength()));
+    assertTrue(instance.fill(buffer, reader));
+    assertEquals(5, buffer.getLength());
+    assertEquals("\udc1c7891", new String(buffer.getBuffer()));
+    buffer = CharacterUtils.newCharacterBuffer(6);
+    assertTrue(instance.fill(buffer, reader));
+    assertEquals(6, buffer.getLength());
+    assertEquals("23\ud801\ud801\udc1c\ud801", new String(buffer.getBuffer(), buffer
+        .getOffset(), buffer.getLength()));
+    assertFalse(instance.fill(buffer, reader));
+
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestCharsRef.java b/lucene/backwards/src/test/org/apache/lucene/util/TestCharsRef.java
new file mode 100644
index 0000000..1852028
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestCharsRef.java
@@ -0,0 +1,41 @@
+package org.apache.lucene.util;
+
+import java.util.Arrays;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestCharsRef extends LuceneTestCase {
+  public void testUTF16InUTF8Order() {
+    final int numStrings = atLeast(1000);
+    BytesRef utf8[] = new BytesRef[numStrings];
+    CharsRef utf16[] = new CharsRef[numStrings];
+    
+    for (int i = 0; i < numStrings; i++) {
+      String s = _TestUtil.randomUnicodeString(random);
+      utf8[i] = new BytesRef(s);
+      utf16[i] = new CharsRef(s);
+    }
+    
+    Arrays.sort(utf8);
+    Arrays.sort(utf16, CharsRef.getUTF16SortedAsUTF8Comparator());
+    
+    for (int i = 0; i < numStrings; i++) {
+      assertEquals(utf8[i].utf8ToString(), utf16[i].toString());
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestCloseableThreadLocal.java b/lucene/backwards/src/test/org/apache/lucene/util/TestCloseableThreadLocal.java
new file mode 100644
index 0000000..9b70810
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestCloseableThreadLocal.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.util;
+
+public class TestCloseableThreadLocal extends LuceneTestCase {
+  public static final String TEST_VALUE = "initvaluetest";
+  
+  public void testInitValue() {
+    InitValueThreadLocal tl = new InitValueThreadLocal();
+    String str = (String)tl.get();
+    assertEquals(TEST_VALUE, str);
+  }
+
+  public void testNullValue() throws Exception {
+    // Tests that null can be set as a valid value (LUCENE-1805). This
+    // previously failed in get().
+    CloseableThreadLocal<Object> ctl = new CloseableThreadLocal<Object>();
+    ctl.set(null);
+    assertNull(ctl.get());
+  }
+
+  public void testDefaultValueWithoutSetting() throws Exception {
+    // LUCENE-1805: make sure default get returns null,
+    // twice in a row
+    CloseableThreadLocal<Object> ctl = new CloseableThreadLocal<Object>();
+    assertNull(ctl.get());
+  }
+
+  public class InitValueThreadLocal extends CloseableThreadLocal<Object> {
+    @Override
+    protected Object initialValue() {
+      return TEST_VALUE;
+    } 
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestCollectionUtil.java b/lucene/backwards/src/test/org/apache/lucene/util/TestCollectionUtil.java
new file mode 100644
index 0000000..8392c9a
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestCollectionUtil.java
@@ -0,0 +1,125 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+
+public class TestCollectionUtil extends LuceneTestCase {
+
+  private List<Integer> createRandomList(int maxSize) {
+    final Integer[] a = new Integer[random.nextInt(maxSize) + 1];
+    for (int i = 0; i < a.length; i++) {
+      a[i] = Integer.valueOf(random.nextInt(a.length));
+    }
+    return Arrays.asList(a);
+  }
+  
+  public void testQuickSort() {
+    for (int i = 0, c = atLeast(500); i < c; i++) {
+      List<Integer> list1 = createRandomList(1000), list2 = new ArrayList<Integer>(list1);
+      CollectionUtil.quickSort(list1);
+      Collections.sort(list2);
+      assertEquals(list2, list1);
+      
+      list1 = createRandomList(1000);
+      list2 = new ArrayList<Integer>(list1);
+      CollectionUtil.quickSort(list1, Collections.reverseOrder());
+      Collections.sort(list2, Collections.reverseOrder());
+      assertEquals(list2, list1);
+      // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+      CollectionUtil.quickSort(list1);
+      Collections.sort(list2);
+      assertEquals(list2, list1);
+    }
+  }
+  
+  public void testMergeSort() {
+    for (int i = 0, c = atLeast(500); i < c; i++) {
+      List<Integer> list1 = createRandomList(1000), list2 = new ArrayList<Integer>(list1);
+      CollectionUtil.mergeSort(list1);
+      Collections.sort(list2);
+      assertEquals(list2, list1);
+      
+      list1 = createRandomList(1000);
+      list2 = new ArrayList<Integer>(list1);
+      CollectionUtil.mergeSort(list1, Collections.reverseOrder());
+      Collections.sort(list2, Collections.reverseOrder());
+      assertEquals(list2, list1);
+      // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+      CollectionUtil.mergeSort(list1);
+      Collections.sort(list2);
+      assertEquals(list2, list1);
+    }
+  }
+  
+  public void testInsertionSort() {
+    for (int i = 0, c = atLeast(500); i < c; i++) {
+      List<Integer> list1 = createRandomList(30), list2 = new ArrayList<Integer>(list1);
+      CollectionUtil.insertionSort(list1);
+      Collections.sort(list2);
+      assertEquals(list2, list1);
+      
+      list1 = createRandomList(30);
+      list2 = new ArrayList<Integer>(list1);
+      CollectionUtil.insertionSort(list1, Collections.reverseOrder());
+      Collections.sort(list2, Collections.reverseOrder());
+      assertEquals(list2, list1);
+      // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+      CollectionUtil.insertionSort(list1);
+      Collections.sort(list2);
+      assertEquals(list2, list1);
+    }
+  }
+  
+  public void testEmptyListSort() {
+    // should produce no exceptions
+    List<Integer> list = Arrays.asList(new Integer[0]);
+    CollectionUtil.quickSort(list);
+    CollectionUtil.mergeSort(list);
+    CollectionUtil.insertionSort(list);
+    CollectionUtil.quickSort(list, Collections.reverseOrder());
+    CollectionUtil.mergeSort(list, Collections.reverseOrder());
+    CollectionUtil.insertionSort(list, Collections.reverseOrder());
+    
+    // check that empty non-random access lists pass sorting without ex (as sorting is not needed)
+    list = new LinkedList<Integer>();
+    CollectionUtil.quickSort(list);
+    CollectionUtil.mergeSort(list);
+    CollectionUtil.insertionSort(list);
+    CollectionUtil.quickSort(list, Collections.reverseOrder());
+    CollectionUtil.mergeSort(list, Collections.reverseOrder());
+    CollectionUtil.insertionSort(list, Collections.reverseOrder());
+  }
+  
+  public void testOneElementListSort() {
+    // check that one-element non-random access lists pass sorting without ex (as sorting is not needed)
+    List<Integer> list = new LinkedList<Integer>();
+    list.add(1);
+    CollectionUtil.quickSort(list);
+    CollectionUtil.mergeSort(list);
+    CollectionUtil.insertionSort(list);
+    CollectionUtil.quickSort(list, Collections.reverseOrder());
+    CollectionUtil.mergeSort(list, Collections.reverseOrder());
+    CollectionUtil.insertionSort(list, Collections.reverseOrder());
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestDoubleBarrelLRUCache.java b/lucene/backwards/src/test/org/apache/lucene/util/TestDoubleBarrelLRUCache.java
new file mode 100644
index 0000000..952c218
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestDoubleBarrelLRUCache.java
@@ -0,0 +1,188 @@
+package org.apache.lucene.util;
+
+/**
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestDoubleBarrelLRUCache extends LuceneTestCase {
+
+  private void testCache(DoubleBarrelLRUCache<CloneableInteger,Object> cache, int n) throws Exception {
+    Object dummy = new Object();
+    
+    for (int i = 0; i < n; i++) {
+      cache.put(new CloneableInteger(i), dummy);
+    }
+    
+    // access every 2nd item in cache
+    for (int i = 0; i < n; i+=2) {
+      assertNotNull(cache.get(new CloneableInteger(i)));
+    }
+    
+    // add n/2 elements to cache, the ones that weren't
+    // touched in the previous loop should now be thrown away
+    for (int i = n; i < n + (n / 2); i++) {
+      cache.put(new CloneableInteger(i), dummy);
+    }
+    
+    // access every 4th item in cache
+    for (int i = 0; i < n; i+=4) {
+      assertNotNull(cache.get(new CloneableInteger(i)));
+    }
+
+    // add 3/4n elements to cache, the ones that weren't
+    // touched in the previous loops should now be thrown away
+    for (int i = n; i < n + (n * 3 / 4); i++) {
+      cache.put(new CloneableInteger(i), dummy);
+    }
+    
+    // access every 4th item in cache
+    for (int i = 0; i < n; i+=4) {
+      assertNotNull(cache.get(new CloneableInteger(i)));
+    }
+  }
+    
+  public void testLRUCache() throws Exception {
+    final int n = 100;
+    testCache(new DoubleBarrelLRUCache<CloneableInteger,Object>(n), n);
+  }
+
+  private class CacheThread extends Thread {
+    private final CloneableObject[] objs;
+    private final DoubleBarrelLRUCache<CloneableObject,Object> c;
+    private final long endTime;
+    volatile boolean failed;
+
+    public CacheThread(DoubleBarrelLRUCache<CloneableObject,Object> c,
+                       CloneableObject[] objs, long endTime) {
+      this.c = c;
+      this.objs = objs;
+      this.endTime = endTime;
+    }
+
+    @Override
+    public void run() {
+      try {
+        long count = 0;
+        long miss = 0;
+        long hit = 0;
+        final int limit = objs.length;
+
+        while(true) {
+          final CloneableObject obj = objs[(int) ((count/2) % limit)];
+          Object v = c.get(obj);
+          if (v == null) {
+            c.put(new CloneableObject(obj), obj);
+            miss++;
+          } else {
+            assert obj == v;
+            hit++;
+          }
+          if ((++count % 10000) == 0) {
+            if (System.currentTimeMillis() >= endTime)  {
+              break;
+            }
+          }
+        }
+
+        addResults(miss, hit);
+      } catch (Throwable t) {
+        failed = true;
+        throw new RuntimeException(t);
+      }
+    }
+  }
+
+  long totMiss, totHit;
+  void addResults(long miss, long hit) {
+    totMiss += miss;
+    totHit += hit;
+  }
+
+  public void testThreadCorrectness() throws Exception {
+    final int NUM_THREADS = 4;
+    final int CACHE_SIZE = 512;
+    final int OBJ_COUNT = 3*CACHE_SIZE;
+
+    DoubleBarrelLRUCache<CloneableObject,Object> c = new DoubleBarrelLRUCache<CloneableObject,Object>(1024);
+
+    CloneableObject[] objs = new CloneableObject[OBJ_COUNT];
+    for(int i=0;i<OBJ_COUNT;i++) {
+      objs[i] = new CloneableObject(new Object());
+    }
+    
+    final CacheThread[] threads = new CacheThread[NUM_THREADS];
+    final long endTime = System.currentTimeMillis()+1000L;
+    for(int i=0;i<NUM_THREADS;i++) {
+      threads[i] = new CacheThread(c, objs, endTime);
+      threads[i].start();
+    }
+    for(int i=0;i<NUM_THREADS;i++) {
+      threads[i].join();
+      assert !threads[i].failed;
+    }
+    //System.out.println("hits=" + totHit + " misses=" + totMiss);
+  }
+  
+  private static class CloneableObject extends DoubleBarrelLRUCache.CloneableKey {
+    private Object value;
+
+    public CloneableObject(Object value) {
+      this.value = value;
+    }
+
+    @Override
+    public boolean equals(Object other) {
+      return this.value.equals(((CloneableObject) other).value);
+    }
+
+    @Override
+    public int hashCode() {
+      return value.hashCode();
+    }
+
+    @Override
+    public Object clone() {
+      return new CloneableObject(value);
+    }
+  }
+
+  protected static class CloneableInteger extends DoubleBarrelLRUCache.CloneableKey {
+    private Integer value;
+
+    public CloneableInteger(Integer value) {
+      this.value = value;
+    }
+
+    @Override
+    public boolean equals(Object other) {
+      return this.value.equals(((CloneableInteger) other).value);
+    }
+
+    @Override
+    public int hashCode() {
+      return value.hashCode();
+    }
+
+    @Override
+    public Object clone() {
+      return new CloneableInteger(value);
+    }
+  }
+
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java b/lucene/backwards/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java
new file mode 100644
index 0000000..befa48f
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java
@@ -0,0 +1,166 @@
+package org.apache.lucene.util;
+
+/**
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.FieldCacheSanityChecker.Insanity;
+import org.apache.lucene.util.FieldCacheSanityChecker.InsanityType;
+
+import java.io.IOException;
+
+public class TestFieldCacheSanityChecker extends LuceneTestCase {
+
+  protected IndexReader readerA;
+  protected IndexReader readerB;
+  protected IndexReader readerX;
+  protected Directory dirA, dirB;
+  private static final int NUM_DOCS = 1000;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    dirA = newDirectory();
+    dirB = newDirectory();
+
+    IndexWriter wA = new IndexWriter(dirA, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    IndexWriter wB = new IndexWriter(dirB, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+
+    long theLong = Long.MAX_VALUE;
+    double theDouble = Double.MAX_VALUE;
+    byte theByte = Byte.MAX_VALUE;
+    short theShort = Short.MAX_VALUE;
+    int theInt = Integer.MAX_VALUE;
+    float theFloat = Float.MAX_VALUE;
+    for (int i = 0; i < NUM_DOCS; i++){
+      Document doc = new Document();
+      doc.add(newField("theLong", String.valueOf(theLong--), Field.Store.NO, Field.Index.NOT_ANALYZED));
+      doc.add(newField("theDouble", String.valueOf(theDouble--), Field.Store.NO, Field.Index.NOT_ANALYZED));
+      doc.add(newField("theByte", String.valueOf(theByte--), Field.Store.NO, Field.Index.NOT_ANALYZED));
+      doc.add(newField("theShort", String.valueOf(theShort--), Field.Store.NO, Field.Index.NOT_ANALYZED));
+      doc.add(newField("theInt", String.valueOf(theInt--), Field.Store.NO, Field.Index.NOT_ANALYZED));
+      doc.add(newField("theFloat", String.valueOf(theFloat--), Field.Store.NO, Field.Index.NOT_ANALYZED));
+      if (0 == i % 3) {
+        wA.addDocument(doc);
+      } else {
+        wB.addDocument(doc);
+      }
+    }
+    wA.close();
+    wB.close();
+    readerA = IndexReader.open(dirA, true);
+    readerB = IndexReader.open(dirB, true);
+    readerX = new MultiReader(new IndexReader[] { readerA, readerB });
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    readerA.close();
+    readerB.close();
+    readerX.close();
+    dirA.close();
+    dirB.close();
+    super.tearDown();
+  }
+
+  public void testSanity() throws IOException {
+    FieldCache cache = FieldCache.DEFAULT;
+    cache.purgeAllCaches();
+
+    cache.getDoubles(readerA, "theDouble");
+    cache.getDoubles(readerA, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER);
+    cache.getDoubles(readerB, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER);
+
+    cache.getInts(readerX, "theInt");
+    cache.getInts(readerX, "theInt", FieldCache.DEFAULT_INT_PARSER);
+
+    // // // 
+
+    Insanity[] insanity = 
+      FieldCacheSanityChecker.checkSanity(cache.getCacheEntries());
+    
+    if (0 < insanity.length)
+      dumpArray(getTestLabel() + " INSANITY", insanity, System.err);
+
+    assertEquals("shouldn't be any cache insanity", 0, insanity.length);
+    cache.purgeAllCaches();
+  }
+
+  public void testInsanity1() throws IOException {
+    FieldCache cache = FieldCache.DEFAULT;
+    cache.purgeAllCaches();
+
+    cache.getInts(readerX, "theInt", FieldCache.DEFAULT_INT_PARSER);
+    cache.getStrings(readerX, "theInt");
+    cache.getBytes(readerX, "theByte");
+
+    // // // 
+
+    Insanity[] insanity = 
+      FieldCacheSanityChecker.checkSanity(cache.getCacheEntries());
+
+    assertEquals("wrong number of cache errors", 1, insanity.length);
+    assertEquals("wrong type of cache error", 
+                 InsanityType.VALUEMISMATCH,
+                 insanity[0].getType());
+    assertEquals("wrong number of entries in cache error", 2,
+                 insanity[0].getCacheEntries().length);
+
+    // we expect bad things, don't let tearDown complain about them
+    cache.purgeAllCaches();
+  }
+
+  public void testInsanity2() throws IOException {
+    FieldCache cache = FieldCache.DEFAULT;
+    cache.purgeAllCaches();
+
+    cache.getStrings(readerA, "theString");
+    cache.getStrings(readerB, "theString");
+    cache.getStrings(readerX, "theString");
+
+    cache.getBytes(readerX, "theByte");
+
+
+    // // // 
+
+    Insanity[] insanity = 
+      FieldCacheSanityChecker.checkSanity(cache.getCacheEntries());
+    
+    assertEquals("wrong number of cache errors", 1, insanity.length);
+    assertEquals("wrong type of cache error", 
+                 InsanityType.SUBREADER,
+                 insanity[0].getType());
+    assertEquals("wrong number of entries in cache error", 3,
+                 insanity[0].getCacheEntries().length);
+
+    // we expect bad things, don't let tearDown complain about them
+    cache.purgeAllCaches();
+  }
+  
+  public void testInsanity3() throws IOException {
+
+    // :TODO: subreader tree walking is really hairy ... add more crazy tests.
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestFixedBitSet.java b/lucene/backwards/src/test/org/apache/lucene/util/TestFixedBitSet.java
new file mode 100644
index 0000000..b4e575e
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestFixedBitSet.java
@@ -0,0 +1,289 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.util;
+
+import java.io.IOException;
+import java.util.BitSet;
+
+import org.apache.lucene.search.DocIdSetIterator;
+
+public class TestFixedBitSet extends LuceneTestCase {
+
+  void doGet(BitSet a, FixedBitSet b) {
+    int max = b.length();
+    for (int i=0; i<max; i++) {
+      if (a.get(i) != b.get(i)) {
+        fail("mismatch: BitSet=["+i+"]="+a.get(i));
+      }
+    }
+  }
+
+  void doNextSetBit(BitSet a, FixedBitSet b) {
+    int aa=-1,bb=-1;
+    do {
+      aa = a.nextSetBit(aa+1);
+      bb = bb < b.length()-1 ? b.nextSetBit(bb+1) : -1;
+      assertEquals(aa,bb);
+    } while (aa>=0);
+  }
+
+  void doPrevSetBit(BitSet a, FixedBitSet b) {
+    int aa = a.size() + random.nextInt(100);
+    int bb = aa;
+    do {
+      // aa = a.prevSetBit(aa-1);
+      aa--;
+      while ((aa >= 0) && (! a.get(aa))) {
+      	aa--;
+      }
+      if (b.length() == 0) {
+        bb = -1;
+      } else if (bb > b.length()-1) {
+        bb = b.prevSetBit(b.length()-1);
+      } else if (bb < 1) {
+        bb = -1;
+      } else {
+        bb = bb >= 1 ? b.prevSetBit(bb-1) : -1;
+      }
+      assertEquals(aa,bb);
+    } while (aa>=0);
+  }
+
+  // test interleaving different FixedBitSetIterator.next()/skipTo()
+  void doIterate(BitSet a, FixedBitSet b, int mode) throws IOException {
+    if (mode==1) doIterate1(a, b);
+    if (mode==2) doIterate2(a, b);
+  }
+
+  void doIterate1(BitSet a, FixedBitSet b) throws IOException {
+    int aa=-1,bb=-1;
+    DocIdSetIterator iterator = b.iterator();
+    do {
+      aa = a.nextSetBit(aa+1);
+      bb = (bb < b.length() && random.nextBoolean()) ? iterator.nextDoc() : iterator.advance(bb + 1);
+      assertEquals(aa == -1 ? DocIdSetIterator.NO_MORE_DOCS : aa, bb);
+    } while (aa>=0);
+  }
+
+  void doIterate2(BitSet a, FixedBitSet b) throws IOException {
+    int aa=-1,bb=-1;
+    DocIdSetIterator iterator = b.iterator();
+    do {
+      aa = a.nextSetBit(aa+1);
+      bb = random.nextBoolean() ? iterator.nextDoc() : iterator.advance(bb + 1);
+      assertEquals(aa == -1 ? DocIdSetIterator.NO_MORE_DOCS : aa, bb);
+    } while (aa>=0);
+  }
+
+  void doRandomSets(int maxSize, int iter, int mode) throws IOException {
+    BitSet a0=null;
+    FixedBitSet b0=null;
+
+    for (int i=0; i<iter; i++) {
+      int sz = _TestUtil.nextInt(random, 2, maxSize);
+      BitSet a = new BitSet(sz);
+      FixedBitSet b = new FixedBitSet(sz);
+
+      // test the various ways of setting bits
+      if (sz>0) {
+        int nOper = random.nextInt(sz);
+        for (int j=0; j<nOper; j++) {
+          int idx;         
+
+          idx = random.nextInt(sz);
+          a.set(idx);
+          b.set(idx);
+          
+          idx = random.nextInt(sz);
+          a.clear(idx);
+          b.clear(idx);
+          
+          idx = random.nextInt(sz);
+          a.flip(idx);
+          b.flip(idx, idx+1);
+
+          idx = random.nextInt(sz);
+          a.flip(idx);
+          b.flip(idx, idx+1);
+
+          boolean val2 = b.get(idx);
+          boolean val = b.getAndSet(idx);
+          assertTrue(val2 == val);
+          assertTrue(b.get(idx));
+          
+          if (!val) b.clear(idx);
+          assertTrue(b.get(idx) == val);
+        }
+      }
+
+      // test that the various ways of accessing the bits are equivalent
+      doGet(a,b);
+
+      // test ranges, including possible extension
+      int fromIndex, toIndex;
+      fromIndex = random.nextInt(sz/2);
+      toIndex = fromIndex + random.nextInt(sz - fromIndex);
+      BitSet aa = (BitSet)a.clone(); aa.flip(fromIndex,toIndex);
+      FixedBitSet bb = (FixedBitSet)b.clone(); bb.flip(fromIndex,toIndex);
+
+      doIterate(aa,bb, mode);   // a problem here is from flip or doIterate
+
+      fromIndex = random.nextInt(sz/2);
+      toIndex = fromIndex + random.nextInt(sz - fromIndex);
+      aa = (BitSet)a.clone(); aa.clear(fromIndex,toIndex);
+      bb = (FixedBitSet)b.clone(); bb.clear(fromIndex,toIndex);
+
+      doNextSetBit(aa,bb); // a problem here is from clear() or nextSetBit
+      
+      doPrevSetBit(aa,bb);
+
+      fromIndex = random.nextInt(sz/2);
+      toIndex = fromIndex + random.nextInt(sz - fromIndex);
+      aa = (BitSet)a.clone(); aa.set(fromIndex,toIndex);
+      bb = (FixedBitSet)b.clone(); bb.set(fromIndex,toIndex);
+
+      doNextSetBit(aa,bb); // a problem here is from set() or nextSetBit
+    
+      doPrevSetBit(aa,bb);
+
+      if (b0 != null && b0.length() <= b.length()) {
+        assertEquals(a.cardinality(), b.cardinality());
+
+        BitSet a_or = (BitSet) a.clone();
+        a_or.or(a0);
+
+        FixedBitSet b_or = (FixedBitSet) b.clone();
+        b_or.or(b0);
+
+        assertEquals(a0.cardinality(), b0.cardinality());
+        assertEquals(a_or.cardinality(), b_or.cardinality());
+
+        doIterate(a_or, b_or, mode);
+      }
+
+      a0=a;
+      b0=b;
+    }
+  }
+  
+  // large enough to flush obvious bugs, small enough to run in <.5 sec as part of a
+  // larger testsuite.
+  public void testSmall() throws IOException {
+    doRandomSets(atLeast(1200), atLeast(1000), 1);
+    doRandomSets(atLeast(1200), atLeast(1000), 2);
+  }
+
+  // uncomment to run a bigger test (~2 minutes).
+  /*
+  public void testBig() {
+    doRandomSets(2000,200000, 1);
+    doRandomSets(2000,200000, 2);
+  }
+  */
+
+  public void testEquals() {
+    // This test can't handle numBits==0:
+    final int numBits = random.nextInt(2000) + 1;
+    FixedBitSet b1 = new FixedBitSet(numBits);
+    FixedBitSet b2 = new FixedBitSet(numBits);
+    assertTrue(b1.equals(b2));
+    assertTrue(b2.equals(b1));
+    for(int iter=0;iter<10*RANDOM_MULTIPLIER;iter++) {
+      int idx = random.nextInt(numBits);
+      if (!b1.get(idx)) {
+        b1.set(idx);
+        assertFalse(b1.equals(b2));
+        assertFalse(b2.equals(b1));
+        b2.set(idx);
+        assertTrue(b1.equals(b2));
+        assertTrue(b2.equals(b1));
+      }
+    }
+
+    // try different type of object
+    assertFalse(b1.equals(new Object()));
+  }
+  
+  public void testHashCodeEquals() {
+    // This test can't handle numBits==0:
+    final int numBits = random.nextInt(2000) + 1;
+    FixedBitSet b1 = new FixedBitSet(numBits);
+    FixedBitSet b2 = new FixedBitSet(numBits);
+    assertTrue(b1.equals(b2));
+    assertTrue(b2.equals(b1));
+    for(int iter=0;iter<10*RANDOM_MULTIPLIER;iter++) {
+      int idx = random.nextInt(numBits);
+      if (!b1.get(idx)) {
+        b1.set(idx);
+        assertFalse(b1.equals(b2));
+        assertFalse(b1.hashCode() == b2.hashCode());
+        b2.set(idx);
+        assertEquals(b1, b2);
+        assertEquals(b1.hashCode(), b2.hashCode());
+      }
+    }
+  } 
+
+  public void testSmallBitSets() {
+    // Make sure size 0-10 bit sets are OK:
+    for(int numBits=0;numBits<10;numBits++) {
+      FixedBitSet b1 = new FixedBitSet(numBits);
+      FixedBitSet b2 = new FixedBitSet(numBits);
+      assertTrue(b1.equals(b2));
+      assertEquals(b1.hashCode(), b2.hashCode());
+      assertEquals(0, b1.cardinality());
+      if (numBits > 0) {
+        b1.set(0, numBits);
+        assertEquals(numBits, b1.cardinality());
+        b1.flip(0, numBits);
+        assertEquals(0, b1.cardinality());
+      }
+    }
+  }
+  
+  private FixedBitSet makeFixedBitSet(int[] a, int numBits) {
+    FixedBitSet bs = new FixedBitSet(numBits);
+    for (int e: a) {
+      bs.set(e);
+    }
+    return bs;
+  }
+
+  private BitSet makeBitSet(int[] a) {
+    BitSet bs = new BitSet();
+    for (int e: a) {
+      bs.set(e);
+    }
+    return bs;
+  }
+
+  private void checkPrevSetBitArray(int [] a, int numBits) {
+    FixedBitSet obs = makeFixedBitSet(a, numBits);
+    BitSet bs = makeBitSet(a);
+    doPrevSetBit(bs, obs);
+  }
+
+  public void testPrevSetBit() {
+    checkPrevSetBitArray(new int[] {}, 0);
+    checkPrevSetBitArray(new int[] {0}, 1);
+    checkPrevSetBitArray(new int[] {0,2}, 3);
+  }
+}
+
+
+
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestIOUtils.java b/lucene/backwards/src/test/org/apache/lucene/util/TestIOUtils.java
new file mode 100644
index 0000000..9371910
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestIOUtils.java
@@ -0,0 +1,107 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+
+public class TestIOUtils extends LuceneTestCase {
+
+  static final class BrokenCloseable implements Closeable {
+    final int i;
+    
+    public BrokenCloseable(int i) {
+      this.i = i;
+    }
+  
+    // Not until Java6: @Override
+    public void close() throws IOException {
+      throw new IOException("TEST-IO-EXCEPTION-" + i);
+    }
+  }
+
+  static final class TestException extends Exception {
+    public TestException() {
+      super("BASE-EXCEPTION");
+    }
+  }
+
+  public void testSuppressedExceptions() {
+    boolean isJava7 = true;
+    try {
+      // this class only exists in Java 7:
+      Class.forName("java.lang.AutoCloseable");
+    } catch (ClassNotFoundException cnfe) {
+      isJava7 = false;
+    }
+    
+    if (!isJava7) {
+      System.err.println("WARNING: TestIOUtils.testSuppressedExceptions: Full test coverage only with Java 7, as suppressed exception recording is not supported before.");
+    }
+    
+    // test with prior exception
+    try {
+      final TestException t = new TestException();
+      IOUtils.closeWhileHandlingException(t, new BrokenCloseable(1), new BrokenCloseable(2));
+    } catch (TestException e1) {
+      assertEquals("BASE-EXCEPTION", e1.getMessage());
+      final StringWriter sw = new StringWriter();
+      final PrintWriter pw = new PrintWriter(sw);
+      e1.printStackTrace(pw);
+      pw.flush();
+      final String trace = sw.toString();
+      if (VERBOSE) {
+        System.out.println("TestIOUtils.testSuppressedExceptions: Thrown Exception stack trace:");
+        System.out.println(trace);
+      }
+      if (isJava7) {
+        assertTrue("Stack trace does not contain first suppressed Exception: " + trace,
+          trace.contains("java.io.IOException: TEST-IO-EXCEPTION-1"));
+        assertTrue("Stack trace does not contain second suppressed Exception: " + trace,
+          trace.contains("java.io.IOException: TEST-IO-EXCEPTION-2"));
+      }
+    } catch (IOException e2) {
+      fail("IOException should not be thrown here");
+    }
+    
+    // test without prior exception
+    try {
+      IOUtils.closeWhileHandlingException((TestException) null, new BrokenCloseable(1), new BrokenCloseable(2));
+    } catch (TestException e1) {
+      fail("TestException should not be thrown here");
+    } catch (IOException e2) {
+      assertEquals("TEST-IO-EXCEPTION-1", e2.getMessage());
+      final StringWriter sw = new StringWriter();
+      final PrintWriter pw = new PrintWriter(sw);
+      e2.printStackTrace(pw);
+      pw.flush();
+      final String trace = sw.toString();
+      if (VERBOSE) {
+        System.out.println("TestIOUtils.testSuppressedExceptions: Thrown Exception stack trace:");
+        System.out.println(trace);
+      }
+      if (isJava7) {
+        assertTrue("Stack trace does not contain suppressed Exception: " + trace,
+          trace.contains("java.io.IOException: TEST-IO-EXCEPTION-2"));
+      }
+    }
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestIndexableBinaryStringTools.java b/lucene/backwards/src/test/org/apache/lucene/util/TestIndexableBinaryStringTools.java
new file mode 100644
index 0000000..8aa2688
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestIndexableBinaryStringTools.java
@@ -0,0 +1,376 @@
+package org.apache.lucene.util;
+
+import org.junit.BeforeClass;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.nio.CharBuffer;
+import java.nio.ByteBuffer;
+
+public class TestIndexableBinaryStringTools extends LuceneTestCase {
+  private static int NUM_RANDOM_TESTS;
+  private static int MAX_RANDOM_BINARY_LENGTH;
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    NUM_RANDOM_TESTS = atLeast(200);
+    MAX_RANDOM_BINARY_LENGTH = atLeast(300);
+  }
+
+  /** @deprecated remove this test for Lucene 4.0 */
+  @Deprecated
+  public void testSingleBinaryRoundTripNIO() {
+    byte[] binary = new byte[] 
+      { (byte)0x23, (byte)0x98, (byte)0x13, (byte)0xE4, (byte)0x76, (byte)0x41,
+        (byte)0xB2, (byte)0xC9, (byte)0x7F, (byte)0x0A, (byte)0xA6, (byte)0xD8 };
+
+    ByteBuffer binaryBuf = ByteBuffer.wrap(binary);
+    CharBuffer encoded = IndexableBinaryStringTools.encode(binaryBuf);
+    ByteBuffer decoded = IndexableBinaryStringTools.decode(encoded);
+    assertEquals("Round trip decode/decode returned different results:"
+                 + System.getProperty("line.separator")
+                 + "original: " + binaryDumpNIO(binaryBuf)
+                 + System.getProperty("line.separator")
+                 + " encoded: " + charArrayDumpNIO(encoded)
+                 + System.getProperty("line.separator")
+                 + " decoded: " + binaryDumpNIO(decoded),
+                 binaryBuf, decoded);
+  }
+  
+  public void testSingleBinaryRoundTrip() {
+    byte[] binary = new byte[] { (byte) 0x23, (byte) 0x98, (byte) 0x13,
+        (byte) 0xE4, (byte) 0x76, (byte) 0x41, (byte) 0xB2, (byte) 0xC9,
+        (byte) 0x7F, (byte) 0x0A, (byte) 0xA6, (byte) 0xD8 };
+
+    int encodedLen = IndexableBinaryStringTools.getEncodedLength(binary, 0,
+        binary.length);
+    char encoded[] = new char[encodedLen];
+    IndexableBinaryStringTools.encode(binary, 0, binary.length, encoded, 0,
+        encoded.length);
+
+    int decodedLen = IndexableBinaryStringTools.getDecodedLength(encoded, 0,
+        encoded.length);
+    byte decoded[] = new byte[decodedLen];
+    IndexableBinaryStringTools.decode(encoded, 0, encoded.length, decoded, 0,
+        decoded.length);
+
+    assertEquals("Round trip decode/decode returned different results:"
+        + System.getProperty("line.separator") + "original: "
+        + binaryDump(binary, binary.length)
+        + System.getProperty("line.separator") + " encoded: "
+        + charArrayDump(encoded, encoded.length)
+        + System.getProperty("line.separator") + " decoded: "
+        + binaryDump(decoded, decoded.length),
+        binaryDump(binary, binary.length), binaryDump(decoded, decoded.length));
+  }
+  
+  /** @deprecated remove this test for Lucene 4.0 */
+  @Deprecated
+  public void testEncodedSortabilityNIO() {
+    byte[] originalArray1 = new byte[MAX_RANDOM_BINARY_LENGTH];
+    ByteBuffer originalBuf1 = ByteBuffer.wrap(originalArray1);
+    char[] originalString1 = new char[MAX_RANDOM_BINARY_LENGTH];
+    CharBuffer originalStringBuf1 = CharBuffer.wrap(originalString1);
+    char[] encoded1 = new char[IndexableBinaryStringTools.getEncodedLength(originalBuf1)];
+    CharBuffer encodedBuf1 = CharBuffer.wrap(encoded1);
+    byte[] original2 = new byte[MAX_RANDOM_BINARY_LENGTH];
+    ByteBuffer originalBuf2 = ByteBuffer.wrap(original2);
+    char[] originalString2 = new char[MAX_RANDOM_BINARY_LENGTH];
+    CharBuffer originalStringBuf2 = CharBuffer.wrap(originalString2);
+    char[] encoded2 = new char[IndexableBinaryStringTools.getEncodedLength(originalBuf2)];
+    CharBuffer encodedBuf2 = CharBuffer.wrap(encoded2);
+    for (int testNum = 0 ; testNum < NUM_RANDOM_TESTS ; ++testNum) {
+      int numBytes1 = random.nextInt(MAX_RANDOM_BINARY_LENGTH - 1) + 1; // Min == 1
+      originalBuf1.limit(numBytes1);
+      originalStringBuf1.limit(numBytes1);
+      
+      for (int byteNum = 0 ; byteNum < numBytes1 ; ++byteNum) {
+        int randomInt = random.nextInt(0x100);
+        originalArray1[byteNum] = (byte) randomInt;
+        originalString1[byteNum] = (char)randomInt;
+      }
+      
+      int numBytes2 = random.nextInt(MAX_RANDOM_BINARY_LENGTH - 1) + 1; // Min == 1
+      originalBuf2.limit(numBytes2);
+      originalStringBuf2.limit(numBytes2);
+      for (int byteNum = 0 ; byteNum < numBytes2 ; ++byteNum) {
+        int randomInt = random.nextInt(0x100);
+        original2[byteNum] = (byte)randomInt;
+        originalString2[byteNum] = (char)randomInt;
+      }
+      int originalComparison = originalStringBuf1.compareTo(originalStringBuf2);
+      originalComparison = originalComparison < 0 ? -1 : originalComparison > 0 ? 1 : 0;
+      
+      IndexableBinaryStringTools.encode(originalBuf1, encodedBuf1);
+      IndexableBinaryStringTools.encode(originalBuf2, encodedBuf2);
+      
+      int encodedComparison = encodedBuf1.compareTo(encodedBuf2);
+      encodedComparison = encodedComparison < 0 ? -1 : encodedComparison > 0 ? 1 : 0;
+      
+      assertEquals("Test #" + (testNum + 1) 
+                   + ": Original bytes and encoded chars compare differently:"
+                   + System.getProperty("line.separator")
+                   + " binary 1: " + binaryDumpNIO(originalBuf1)
+                   + System.getProperty("line.separator")
+                   + " binary 2: " + binaryDumpNIO(originalBuf2)
+                   + System.getProperty("line.separator")
+                   + "encoded 1: " + charArrayDumpNIO(encodedBuf1)
+                   + System.getProperty("line.separator")
+                   + "encoded 2: " + charArrayDumpNIO(encodedBuf2)
+                   + System.getProperty("line.separator"),
+                   originalComparison, encodedComparison);
+    }
+  }
+
+  public void testEncodedSortability() {
+    byte[] originalArray1 = new byte[MAX_RANDOM_BINARY_LENGTH];
+    char[] originalString1 = new char[MAX_RANDOM_BINARY_LENGTH];
+    char[] encoded1 = new char[MAX_RANDOM_BINARY_LENGTH * 10];
+    byte[] original2 = new byte[MAX_RANDOM_BINARY_LENGTH];
+    char[] originalString2 = new char[MAX_RANDOM_BINARY_LENGTH];
+    char[] encoded2 = new char[MAX_RANDOM_BINARY_LENGTH * 10];
+
+    for (int testNum = 0; testNum < NUM_RANDOM_TESTS; ++testNum) {
+      int numBytes1 = random.nextInt(MAX_RANDOM_BINARY_LENGTH - 1) + 1; // Min == 1
+
+      for (int byteNum = 0; byteNum < numBytes1; ++byteNum) {
+        int randomInt = random.nextInt(0x100);
+        originalArray1[byteNum] = (byte) randomInt;
+        originalString1[byteNum] = (char) randomInt;
+      }
+
+      int numBytes2 = random.nextInt(MAX_RANDOM_BINARY_LENGTH - 1) + 1; // Min == 1
+
+      for (int byteNum = 0; byteNum < numBytes2; ++byteNum) {
+        int randomInt = random.nextInt(0x100);
+        original2[byteNum] = (byte) randomInt;
+        originalString2[byteNum] = (char) randomInt;
+      }
+      int originalComparison = new String(originalString1, 0, numBytes1)
+          .compareTo(new String(originalString2, 0, numBytes2));
+      originalComparison = originalComparison < 0 ? -1
+          : originalComparison > 0 ? 1 : 0;
+
+      int encodedLen1 = IndexableBinaryStringTools.getEncodedLength(
+          originalArray1, 0, numBytes1);
+      if (encodedLen1 > encoded1.length)
+        encoded1 = new char[ArrayUtil.oversize(encodedLen1, RamUsageEstimator.NUM_BYTES_CHAR)];
+      IndexableBinaryStringTools.encode(originalArray1, 0, numBytes1, encoded1,
+          0, encodedLen1);
+
+      int encodedLen2 = IndexableBinaryStringTools.getEncodedLength(original2,
+          0, numBytes2);
+      if (encodedLen2 > encoded2.length)
+        encoded2 = new char[ArrayUtil.oversize(encodedLen2, RamUsageEstimator.NUM_BYTES_CHAR)];
+      IndexableBinaryStringTools.encode(original2, 0, numBytes2, encoded2, 0,
+          encodedLen2);
+
+      int encodedComparison = new String(encoded1, 0, encodedLen1)
+          .compareTo(new String(encoded2, 0, encodedLen2));
+      encodedComparison = encodedComparison < 0 ? -1
+          : encodedComparison > 0 ? 1 : 0;
+
+      assertEquals("Test #" + (testNum + 1)
+          + ": Original bytes and encoded chars compare differently:"
+          + System.getProperty("line.separator") + " binary 1: "
+          + binaryDump(originalArray1, numBytes1)
+          + System.getProperty("line.separator") + " binary 2: "
+          + binaryDump(original2, numBytes2)
+          + System.getProperty("line.separator") + "encoded 1: "
+          + charArrayDump(encoded1, encodedLen1)
+          + System.getProperty("line.separator") + "encoded 2: "
+          + charArrayDump(encoded2, encodedLen2)
+          + System.getProperty("line.separator"), originalComparison,
+          encodedComparison);
+    }
+  }
+
+  /** @deprecated remove this test for Lucene 4.0 */
+  @Deprecated
+  public void testEmptyInputNIO() {
+    byte[] binary = new byte[0];
+    CharBuffer encoded = IndexableBinaryStringTools.encode(ByteBuffer.wrap(binary));
+    ByteBuffer decoded = IndexableBinaryStringTools.decode(encoded);
+    assertNotNull("decode() returned null", decoded);
+    assertEquals("decoded empty input was not empty", decoded.limit(), 0);
+  }
+  
+  public void testEmptyInput() {
+    byte[] binary = new byte[0];
+
+    int encodedLen = IndexableBinaryStringTools.getEncodedLength(binary, 0,
+        binary.length);
+    char[] encoded = new char[encodedLen];
+    IndexableBinaryStringTools.encode(binary, 0, binary.length, encoded, 0,
+        encoded.length);
+
+    int decodedLen = IndexableBinaryStringTools.getDecodedLength(encoded, 0,
+        encoded.length);
+    byte[] decoded = new byte[decodedLen];
+    IndexableBinaryStringTools.decode(encoded, 0, encoded.length, decoded, 0,
+        decoded.length);
+
+    assertEquals("decoded empty input was not empty", decoded.length, 0);
+  }
+  
+  /** @deprecated remove this test for Lucene 4.0 */
+  @Deprecated
+  public void testAllNullInputNIO() {
+    byte[] binary = new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+    ByteBuffer binaryBuf = ByteBuffer.wrap(binary);
+    CharBuffer encoded = IndexableBinaryStringTools.encode(binaryBuf);
+    assertNotNull("encode() returned null", encoded);
+    ByteBuffer decodedBuf = IndexableBinaryStringTools.decode(encoded);
+    assertNotNull("decode() returned null", decodedBuf);
+    assertEquals("Round trip decode/decode returned different results:"
+                 + System.getProperty("line.separator")
+                 + "  original: " + binaryDumpNIO(binaryBuf)
+                 + System.getProperty("line.separator")
+                 + "decodedBuf: " + binaryDumpNIO(decodedBuf),
+                 binaryBuf, decodedBuf);
+  }
+  
+  public void testAllNullInput() {
+    byte[] binary = new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+    int encodedLen = IndexableBinaryStringTools.getEncodedLength(binary, 0,
+        binary.length);
+    char encoded[] = new char[encodedLen];
+    IndexableBinaryStringTools.encode(binary, 0, binary.length, encoded, 0,
+        encoded.length);
+
+    int decodedLen = IndexableBinaryStringTools.getDecodedLength(encoded, 0,
+        encoded.length);
+    byte[] decoded = new byte[decodedLen];
+    IndexableBinaryStringTools.decode(encoded, 0, encoded.length, decoded, 0,
+        decoded.length);
+
+    assertEquals("Round trip decode/decode returned different results:"
+        + System.getProperty("line.separator") + "  original: "
+        + binaryDump(binary, binary.length)
+        + System.getProperty("line.separator") + "decodedBuf: "
+        + binaryDump(decoded, decoded.length),
+        binaryDump(binary, binary.length), binaryDump(decoded, decoded.length));
+  }
+  
+  /** @deprecated remove this test for Lucene 4.0 */
+  @Deprecated
+  public void testRandomBinaryRoundTripNIO() {
+    byte[] binary = new byte[MAX_RANDOM_BINARY_LENGTH];
+    ByteBuffer binaryBuf = ByteBuffer.wrap(binary);
+    char[] encoded = new char[IndexableBinaryStringTools.getEncodedLength(binaryBuf)];
+    CharBuffer encodedBuf = CharBuffer.wrap(encoded);
+    byte[] decoded = new byte[MAX_RANDOM_BINARY_LENGTH];
+    ByteBuffer decodedBuf = ByteBuffer.wrap(decoded);
+    for (int testNum = 0 ; testNum < NUM_RANDOM_TESTS ; ++testNum) {
+      int numBytes = random.nextInt(MAX_RANDOM_BINARY_LENGTH - 1) + 1 ; // Min == 1
+      binaryBuf.limit(numBytes);
+      for (int byteNum = 0 ; byteNum < numBytes ; ++byteNum) {
+        binary[byteNum] = (byte)random.nextInt(0x100);
+      }
+      IndexableBinaryStringTools.encode(binaryBuf, encodedBuf);
+      IndexableBinaryStringTools.decode(encodedBuf, decodedBuf);
+      assertEquals("Test #" + (testNum + 1) 
+                   + ": Round trip decode/decode returned different results:"
+                   + System.getProperty("line.separator")
+                   + "  original: " + binaryDumpNIO(binaryBuf)
+                   + System.getProperty("line.separator")
+                   + "encodedBuf: " + charArrayDumpNIO(encodedBuf)
+                   + System.getProperty("line.separator")
+                   + "decodedBuf: " + binaryDumpNIO(decodedBuf),
+                   binaryBuf, decodedBuf);
+    }
+  }
+
+  public void testRandomBinaryRoundTrip() {
+    byte[] binary = new byte[MAX_RANDOM_BINARY_LENGTH];
+    char[] encoded = new char[MAX_RANDOM_BINARY_LENGTH * 10];
+    byte[] decoded = new byte[MAX_RANDOM_BINARY_LENGTH];
+    for (int testNum = 0; testNum < NUM_RANDOM_TESTS; ++testNum) {
+      int numBytes = random.nextInt(MAX_RANDOM_BINARY_LENGTH - 1) + 1; // Min == 1                                                                   
+
+      for (int byteNum = 0; byteNum < numBytes; ++byteNum) {
+        binary[byteNum] = (byte) random.nextInt(0x100);
+      }
+
+      int encodedLen = IndexableBinaryStringTools.getEncodedLength(binary, 0,
+          numBytes);
+      if (encoded.length < encodedLen)
+        encoded = new char[ArrayUtil.oversize(encodedLen, RamUsageEstimator.NUM_BYTES_CHAR)];
+      IndexableBinaryStringTools.encode(binary, 0, numBytes, encoded, 0,
+          encodedLen);
+
+      int decodedLen = IndexableBinaryStringTools.getDecodedLength(encoded, 0,
+          encodedLen);
+      IndexableBinaryStringTools.decode(encoded, 0, encodedLen, decoded, 0,
+          decodedLen);
+
+      assertEquals("Test #" + (testNum + 1)
+          + ": Round trip decode/decode returned different results:"
+          + System.getProperty("line.separator") + "  original: "
+          + binaryDump(binary, numBytes) + System.getProperty("line.separator")
+          + "encodedBuf: " + charArrayDump(encoded, encodedLen)
+          + System.getProperty("line.separator") + "decodedBuf: "
+          + binaryDump(decoded, decodedLen), binaryDump(binary, numBytes),
+          binaryDump(decoded, decodedLen));
+    }
+  }
+  
+  /** @deprecated remove this method for Lucene 4.0 */
+  @Deprecated
+  public String binaryDumpNIO(ByteBuffer binaryBuf) {
+    return binaryDump(binaryBuf.array(), 
+        binaryBuf.limit() - binaryBuf.arrayOffset());
+  }
+
+  public String binaryDump(byte[] binary, int numBytes) {
+    StringBuilder buf = new StringBuilder();
+    for (int byteNum = 0 ; byteNum < numBytes ; ++byteNum) {
+      String hex = Integer.toHexString(binary[byteNum] & 0xFF);
+      if (hex.length() == 1) {
+        buf.append('0');
+      }
+      buf.append(hex.toUpperCase());
+      if (byteNum < numBytes - 1) {
+        buf.append(' ');
+      }
+    }
+    return buf.toString();
+  }
+  /** @deprecated remove this method for Lucene 4.0 */
+  @Deprecated
+  public String charArrayDumpNIO(CharBuffer charBuf) {
+    return charArrayDump(charBuf.array(), 
+        charBuf.limit() - charBuf.arrayOffset());
+  }
+  
+  public String charArrayDump(char[] charArray, int numBytes) {
+    StringBuilder buf = new StringBuilder();
+    for (int charNum = 0 ; charNum < numBytes ; ++charNum) {
+      String hex = Integer.toHexString(charArray[charNum]);
+      for (int digit = 0 ; digit < 4 - hex.length() ; ++digit) {
+        buf.append('0');
+      }
+      buf.append(hex.toUpperCase());
+      if (charNum < numBytes - 1) {
+        buf.append(' ');
+      }
+    }
+    return buf.toString();
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestNumericUtils.java b/lucene/backwards/src/test/org/apache/lucene/util/TestNumericUtils.java
new file mode 100644
index 0000000..6d250c8
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestNumericUtils.java
@@ -0,0 +1,522 @@
+package org.apache.lucene.util;
+
+/**
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Random;
+
+public class TestNumericUtils extends LuceneTestCase {
+
+  public void testLongConversionAndOrdering() throws Exception {
+    // generate a series of encoded longs, each numerical one bigger than the one before
+    String last=null;
+    for (long l=-100000L; l<100000L; l++) {
+      String act=NumericUtils.longToPrefixCoded(l);
+      if (last!=null) {
+        // test if smaller
+        assertTrue("actual bigger than last", last.compareTo(act) < 0 );
+      }
+      // test is back and forward conversion works
+      assertEquals("forward and back conversion should generate same long", l, NumericUtils.prefixCodedToLong(act));
+      // next step
+      last=act;
+    }
+  }
+
+  public void testIntConversionAndOrdering() throws Exception {
+    // generate a series of encoded ints, each numerical one bigger than the one before
+    String last=null;
+    for (int i=-100000; i<100000; i++) {
+      String act=NumericUtils.intToPrefixCoded(i);
+      if (last!=null) {
+        // test if smaller
+        assertTrue("actual bigger than last", last.compareTo(act) < 0 );
+      }
+      // test is back and forward conversion works
+      assertEquals("forward and back conversion should generate same int", i, NumericUtils.prefixCodedToInt(act));
+      // next step
+      last=act;
+    }
+  }
+
+  public void testLongSpecialValues() throws Exception {
+    long[] vals=new long[]{
+      Long.MIN_VALUE, Long.MIN_VALUE+1, Long.MIN_VALUE+2, -5003400000000L,
+      -4000L, -3000L, -2000L, -1000L, -1L, 0L, 1L, 10L, 300L, 50006789999999999L, Long.MAX_VALUE-2, Long.MAX_VALUE-1, Long.MAX_VALUE
+    };
+    String[] prefixVals=new String[vals.length];
+    
+    for (int i=0; i<vals.length; i++) {
+      prefixVals[i]=NumericUtils.longToPrefixCoded(vals[i]);
+      
+      // check forward and back conversion
+      assertEquals( "forward and back conversion should generate same long", vals[i], NumericUtils.prefixCodedToLong(prefixVals[i]) );
+
+      // test if decoding values as int fails correctly
+      try {
+        NumericUtils.prefixCodedToInt(prefixVals[i]);
+        fail("decoding a prefix coded long value as int should fail");
+      } catch (NumberFormatException e) {
+        // worked
+      }
+    }
+    
+    // check sort order (prefixVals should be ascending)
+    for (int i=1; i<prefixVals.length; i++) {
+      assertTrue( "check sort order", prefixVals[i-1].compareTo( prefixVals[i] ) < 0 );
+    }
+        
+    // check the prefix encoding, lower precision should have the difference to original value equal to the lower removed bits
+    for (int i=0; i<vals.length; i++) {
+      for (int j=0; j<64; j++) {
+        long prefixVal=NumericUtils.prefixCodedToLong(NumericUtils.longToPrefixCoded(vals[i], j));
+        long mask=(1L << j) - 1L;
+        assertEquals( "difference between prefix val and original value for "+vals[i]+" with shift="+j, vals[i] & mask, vals[i]-prefixVal );
+      }
+    }
+  }
+
+  public void testIntSpecialValues() throws Exception {
+    int[] vals=new int[]{
+      Integer.MIN_VALUE, Integer.MIN_VALUE+1, Integer.MIN_VALUE+2, -64765767,
+      -4000, -3000, -2000, -1000, -1, 0, 1, 10, 300, 765878989, Integer.MAX_VALUE-2, Integer.MAX_VALUE-1, Integer.MAX_VALUE
+    };
+    String[] prefixVals=new String[vals.length];
+    
+    for (int i=0; i<vals.length; i++) {
+      prefixVals[i]=NumericUtils.intToPrefixCoded(vals[i]);
+      
+      // check forward and back conversion
+      assertEquals( "forward and back conversion should generate same int", vals[i], NumericUtils.prefixCodedToInt(prefixVals[i]) );
+      
+      // test if decoding values as long fails correctly
+      try {
+        NumericUtils.prefixCodedToLong(prefixVals[i]);
+        fail("decoding a prefix coded int value as long should fail");
+      } catch (NumberFormatException e) {
+        // worked
+      }
+    }
+    
+    // check sort order (prefixVals should be ascending)
+    for (int i=1; i<prefixVals.length; i++) {
+      assertTrue( "check sort order", prefixVals[i-1].compareTo( prefixVals[i] ) < 0 );
+    }
+    
+    // check the prefix encoding, lower precision should have the difference to original value equal to the lower removed bits
+    for (int i=0; i<vals.length; i++) {
+      for (int j=0; j<32; j++) {
+        int prefixVal=NumericUtils.prefixCodedToInt(NumericUtils.intToPrefixCoded(vals[i], j));
+        int mask=(1 << j) - 1;
+        assertEquals( "difference between prefix val and original value for "+vals[i]+" with shift="+j, vals[i] & mask, vals[i]-prefixVal );
+      }
+    }
+  }
+
+  public void testDoubles() throws Exception {
+    double[] vals=new double[]{
+      Double.NEGATIVE_INFINITY, -2.3E25, -1.0E15, -1.0, -1.0E-1, -1.0E-2, -0.0, 
+      +0.0, 1.0E-2, 1.0E-1, 1.0, 1.0E15, 2.3E25, Double.POSITIVE_INFINITY
+    };
+    long[] longVals=new long[vals.length];
+    
+    // check forward and back conversion
+    for (int i=0; i<vals.length; i++) {
+      longVals[i]=NumericUtils.doubleToSortableLong(vals[i]);
+      assertTrue( "forward and back conversion should generate same double", Double.compare(vals[i], NumericUtils.sortableLongToDouble(longVals[i]))==0 );
+    }
+    
+    // check sort order (prefixVals should be ascending)
+    for (int i=1; i<longVals.length; i++) {
+      assertTrue( "check sort order", longVals[i-1] < longVals[i] );
+    }
+  }
+
+  public void testFloats() throws Exception {
+    float[] vals=new float[]{
+      Float.NEGATIVE_INFINITY, -2.3E25f, -1.0E15f, -1.0f, -1.0E-1f, -1.0E-2f, -0.0f, 
+      +0.0f, 1.0E-2f, 1.0E-1f, 1.0f, 1.0E15f, 2.3E25f, Float.POSITIVE_INFINITY
+    };
+    int[] intVals=new int[vals.length];
+    
+    // check forward and back conversion
+    for (int i=0; i<vals.length; i++) {
+      intVals[i]=NumericUtils.floatToSortableInt(vals[i]);
+      assertTrue( "forward and back conversion should generate same double", Float.compare(vals[i], NumericUtils.sortableIntToFloat(intVals[i]))==0 );
+    }
+    
+    // check sort order (prefixVals should be ascending)
+    for (int i=1; i<intVals.length; i++) {
+      assertTrue( "check sort order", intVals[i-1] < intVals[i] );
+    }
+  }
+  
+  // INFO: Tests for trieCodeLong()/trieCodeInt() not needed because implicitely tested by range filter tests
+  
+  /** Note: The neededBounds Iterable must be unsigned (easier understanding what's happening) */
+  private void assertLongRangeSplit(final long lower, final long upper, int precisionStep,
+    final boolean useBitSet, final Iterable<Long> expectedBounds, final Iterable<Integer> expectedShifts
+  ) throws Exception {
+    // Cannot use FixedBitSet since the range could be long:
+    final OpenBitSet bits=useBitSet ? new OpenBitSet(upper-lower+1) : null;
+    final Iterator<Long> neededBounds = (expectedBounds == null) ? null : expectedBounds.iterator();
+    final Iterator<Integer> neededShifts = (expectedShifts == null) ? null : expectedShifts.iterator();
+
+    NumericUtils.splitLongRange(new NumericUtils.LongRangeBuilder() {
+      @Override
+      public void addRange(long min, long max, int shift) {
+        assertTrue("min, max should be inside bounds", min>=lower && min<=upper && max>=lower && max<=upper);
+        if (useBitSet) for (long l=min; l<=max; l++) {
+          assertFalse("ranges should not overlap", bits.getAndSet(l-lower) );
+          // extra exit condition to prevent overflow on MAX_VALUE
+          if (l == max) break;
+        }
+        if (neededBounds == null || neededShifts == null)
+          return;
+        // make unsigned longs for easier display and understanding
+        min ^= 0x8000000000000000L;
+        max ^= 0x8000000000000000L;
+        //System.out.println("0x"+Long.toHexString(min>>>shift)+"L,0x"+Long.toHexString(max>>>shift)+"L)/*shift="+shift+"*/,");
+        assertEquals( "shift", neededShifts.next().intValue(), shift);
+        assertEquals( "inner min bound", neededBounds.next().longValue(), min>>>shift);
+        assertEquals( "inner max bound", neededBounds.next().longValue(), max>>>shift);
+      }
+    }, precisionStep, lower, upper);
+    
+    if (useBitSet) {
+      // after flipping all bits in the range, the cardinality should be zero
+      bits.flip(0,upper-lower+1);
+      assertEquals("The sub-range concenated should match the whole range", 0, bits.cardinality());
+    }
+  }
+  
+  /** LUCENE-2541: NumericRangeQuery errors with endpoints near long min and max values */
+  public void testLongExtremeValues() throws Exception {
+    // upper end extremes
+    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 1, true, Arrays.asList(
+      0xffffffffffffffffL,0xffffffffffffffffL
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 2, true, Arrays.asList(
+      0xffffffffffffffffL,0xffffffffffffffffL
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 4, true, Arrays.asList(
+      0xffffffffffffffffL,0xffffffffffffffffL
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 6, true, Arrays.asList(
+      0xffffffffffffffffL,0xffffffffffffffffL
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 8, true, Arrays.asList(
+      0xffffffffffffffffL,0xffffffffffffffffL
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 64, true, Arrays.asList(
+      0xffffffffffffffffL,0xffffffffffffffffL
+    ), Arrays.asList(
+      0
+    ));
+
+    assertLongRangeSplit(Long.MAX_VALUE-0xfL, Long.MAX_VALUE, 4, true, Arrays.asList(
+      0xfffffffffffffffL,0xfffffffffffffffL
+    ), Arrays.asList(
+      4
+    ));
+    assertLongRangeSplit(Long.MAX_VALUE-0x10L, Long.MAX_VALUE, 4, true, Arrays.asList(
+      0xffffffffffffffefL,0xffffffffffffffefL,
+      0xfffffffffffffffL,0xfffffffffffffffL
+    ), Arrays.asList(
+      0, 4
+    ));
+
+    // lower end extremes
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 1, true, Arrays.asList(
+      0x0000000000000000L,0x0000000000000000L
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 2, true, Arrays.asList(
+      0x0000000000000000L,0x0000000000000000L
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 4, true, Arrays.asList(
+      0x0000000000000000L,0x0000000000000000L
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 6, true, Arrays.asList(
+      0x0000000000000000L,0x0000000000000000L
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 8, true, Arrays.asList(
+      0x0000000000000000L,0x0000000000000000L
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 64, true, Arrays.asList(
+      0x0000000000000000L,0x0000000000000000L
+    ), Arrays.asList(
+      0
+    ));
+
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE+0xfL, 4, true, Arrays.asList(
+      0x000000000000000L,0x000000000000000L
+    ), Arrays.asList(
+      4
+    ));
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE+0x10L, 4, true, Arrays.asList(
+      0x0000000000000010L,0x0000000000000010L,
+      0x000000000000000L,0x000000000000000L
+    ), Arrays.asList(
+      0, 4
+    ));
+  }
+  
+  public void testRandomSplit() throws Exception {
+    long num = (long) atLeast(10);
+    for (long i=0; i < num; i++) {
+      executeOneRandomSplit(random);
+    }
+  }
+  
+  private void executeOneRandomSplit(final Random random) throws Exception {
+    long lower = randomLong(random);
+    long len = random.nextInt(16384*1024); // not too large bitsets, else OOME!
+    while (lower + len < lower) { // overflow
+      lower >>= 1;
+    }
+    assertLongRangeSplit(lower, lower + len, random.nextInt(64) + 1, true, null, null);
+  }
+  
+  private long randomLong(final Random random) {
+    long val;
+    switch(random.nextInt(4)) {
+      case 0:
+        val = 1L << (random.nextInt(63)); //  patterns like 0x000000100000 (-1 yields patterns like 0x0000fff)
+        break;
+      case 1:
+        val = -1L << (random.nextInt(63)); // patterns like 0xfffff00000
+        break;
+      default:
+        val = random.nextLong();
+    }
+
+    val += random.nextInt(5)-2;
+
+    if (random.nextBoolean()) {
+      if (random.nextBoolean()) val += random.nextInt(100)-50;
+      if (random.nextBoolean()) val = ~val;
+      if (random.nextBoolean()) val = val<<1;
+      if (random.nextBoolean()) val = val>>>1;
+    }
+
+    return val;
+  }
+  
+  public void testSplitLongRange() throws Exception {
+    // a hard-coded "standard" range
+    assertLongRangeSplit(-5000L, 9500L, 4, true, Arrays.asList(
+      0x7fffffffffffec78L,0x7fffffffffffec7fL,
+      0x8000000000002510L,0x800000000000251cL,
+      0x7fffffffffffec8L, 0x7fffffffffffecfL,
+      0x800000000000250L, 0x800000000000250L,
+      0x7fffffffffffedL,  0x7fffffffffffefL,
+      0x80000000000020L,  0x80000000000024L,
+      0x7ffffffffffffL,   0x8000000000001L
+    ), Arrays.asList(
+      0, 0,
+      4, 4,
+      8, 8,
+      12
+    ));
+    
+    // the same with no range splitting
+    assertLongRangeSplit(-5000L, 9500L, 64, true, Arrays.asList(
+      0x7fffffffffffec78L,0x800000000000251cL
+    ), Arrays.asList(
+      0
+    ));
+    
+    // this tests optimized range splitting, if one of the inner bounds
+    // is also the bound of the next lower precision, it should be used completely
+    assertLongRangeSplit(0L, 1024L+63L, 4, true, Arrays.asList(
+      0x800000000000040L, 0x800000000000043L,
+      0x80000000000000L,  0x80000000000003L
+    ), Arrays.asList(
+      4, 8
+    ));
+    
+    // the full long range should only consist of a lowest precision range; no bitset testing here, as too much memory needed :-)
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 8, false, Arrays.asList(
+      0x00L,0xffL
+    ), Arrays.asList(
+      56
+    ));
+
+    // the same with precisionStep=4
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 4, false, Arrays.asList(
+      0x0L,0xfL
+    ), Arrays.asList(
+      60
+    ));
+
+    // the same with precisionStep=2
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 2, false, Arrays.asList(
+      0x0L,0x3L
+    ), Arrays.asList(
+      62
+    ));
+
+    // the same with precisionStep=1
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 1, false, Arrays.asList(
+      0x0L,0x1L
+    ), Arrays.asList(
+      63
+    ));
+
+    // a inverse range should produce no sub-ranges
+    assertLongRangeSplit(9500L, -5000L, 4, false, Collections.<Long>emptyList(), Collections.<Integer>emptyList());    
+
+    // a 0-length range should reproduce the range itsself
+    assertLongRangeSplit(9500L, 9500L, 4, false, Arrays.asList(
+      0x800000000000251cL,0x800000000000251cL
+    ), Arrays.asList(
+      0
+    ));
+  }
+
+  /** Note: The neededBounds Iterable must be unsigned (easier understanding what's happening) */
+  private void assertIntRangeSplit(final int lower, final int upper, int precisionStep,
+    final boolean useBitSet, final Iterable<Integer> expectedBounds, final Iterable<Integer> expectedShifts
+  ) throws Exception {
+    final FixedBitSet bits=useBitSet ? new FixedBitSet(upper-lower+1) : null;
+    final Iterator<Integer> neededBounds = (expectedBounds == null) ? null : expectedBounds.iterator();
+    final Iterator<Integer> neededShifts = (expectedShifts == null) ? null : expectedShifts.iterator();
+    
+    NumericUtils.splitIntRange(new NumericUtils.IntRangeBuilder() {
+      @Override
+      public void addRange(int min, int max, int shift) {
+        assertTrue("min, max should be inside bounds", min>=lower && min<=upper && max>=lower && max<=upper);
+        if (useBitSet) for (int i=min; i<=max; i++) {
+          assertFalse("ranges should not overlap", bits.getAndSet(i-lower) );
+          // extra exit condition to prevent overflow on MAX_VALUE
+          if (i == max) break;
+        }
+        if (neededBounds == null)
+          return;
+        // make unsigned ints for easier display and understanding
+        min ^= 0x80000000;
+        max ^= 0x80000000;
+        //System.out.println("0x"+Integer.toHexString(min>>>shift)+",0x"+Integer.toHexString(max>>>shift)+")/*shift="+shift+"*/,");
+        assertEquals( "shift", neededShifts.next().intValue(), shift);
+        assertEquals( "inner min bound", neededBounds.next().intValue(), min>>>shift);
+        assertEquals( "inner max bound", neededBounds.next().intValue(), max>>>shift);
+      }
+    }, precisionStep, lower, upper);
+    
+    if (useBitSet) {
+      // after flipping all bits in the range, the cardinality should be zero
+      bits.flip(0, upper-lower+1);
+      assertEquals("The sub-range concenated should match the whole range", 0, bits.cardinality());
+    }
+  }
+  
+  public void testSplitIntRange() throws Exception {
+    // a hard-coded "standard" range
+    assertIntRangeSplit(-5000, 9500, 4, true, Arrays.asList(
+      0x7fffec78,0x7fffec7f,
+      0x80002510,0x8000251c,
+      0x7fffec8, 0x7fffecf,
+      0x8000250, 0x8000250,
+      0x7fffed,  0x7fffef,
+      0x800020,  0x800024,
+      0x7ffff,   0x80001
+    ), Arrays.asList(
+      0, 0,
+      4, 4,
+      8, 8,
+      12
+    ));
+    
+    // the same with no range splitting
+    assertIntRangeSplit(-5000, 9500, 32, true, Arrays.asList(
+      0x7fffec78,0x8000251c
+    ), Arrays.asList(
+      0
+    ));
+    
+    // this tests optimized range splitting, if one of the inner bounds
+    // is also the bound of the next lower precision, it should be used completely
+    assertIntRangeSplit(0, 1024+63, 4, true, Arrays.asList(
+      0x8000040, 0x8000043,
+      0x800000,  0x800003
+    ), Arrays.asList(
+      4, 8
+    ));
+    
+    // the full int range should only consist of a lowest precision range; no bitset testing here, as too much memory needed :-)
+    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 8, false, Arrays.asList(
+      0x00,0xff
+    ), Arrays.asList(
+      24
+    ));
+
+    // the same with precisionStep=4
+    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 4, false, Arrays.asList(
+      0x0,0xf
+    ), Arrays.asList(
+      28
+    ));
+
+    // the same with precisionStep=2
+    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 2, false, Arrays.asList(
+      0x0,0x3
+    ), Arrays.asList(
+      30
+    ));
+
+    // the same with precisionStep=1
+    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 1, false, Arrays.asList(
+      0x0,0x1
+    ), Arrays.asList(
+      31
+    ));
+
+    // a inverse range should produce no sub-ranges
+    assertIntRangeSplit(9500, -5000, 4, false, Collections.<Integer>emptyList(), Collections.<Integer>emptyList());    
+
+    // a 0-length range should reproduce the range itsself
+    assertIntRangeSplit(9500, 9500, 4, false, Arrays.asList(
+      0x8000251c,0x8000251c
+    ), Arrays.asList(
+      0
+    ));
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestOpenBitSet.java b/lucene/backwards/src/test/org/apache/lucene/util/TestOpenBitSet.java
new file mode 100644
index 0000000..b487a58
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestOpenBitSet.java
@@ -0,0 +1,326 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.util;
+
+import java.util.BitSet;
+
+import org.apache.lucene.search.DocIdSetIterator;
+
+public class TestOpenBitSet extends LuceneTestCase {
+
+  void doGet(BitSet a, OpenBitSet b) {
+    int max = a.size();
+    for (int i=0; i<max; i++) {
+      if (a.get(i) != b.get(i)) {
+        fail("mismatch: BitSet=["+i+"]="+a.get(i));
+      }
+      if (a.get(i) != b.get((long) i)) {
+        fail("mismatch: BitSet=["+i+"]="+a.get(i));
+      }
+    }
+  }
+
+  void doGetFast(BitSet a, OpenBitSet b, int max) {
+    for (int i=0; i<max; i++) {
+      if (a.get(i) != b.fastGet(i)) {
+        fail("mismatch: BitSet=["+i+"]="+a.get(i));
+      }
+      if (a.get(i) != b.fastGet((long) i)) {
+        fail("mismatch: BitSet=["+i+"]="+a.get(i));
+      }
+    }
+  }
+
+  void doNextSetBit(BitSet a, OpenBitSet b) {
+    int aa=-1,bb=-1;
+    do {
+      aa = a.nextSetBit(aa+1);
+      bb = b.nextSetBit(bb+1);
+      assertEquals(aa,bb);
+    } while (aa>=0);
+  }
+
+  void doNextSetBitLong(BitSet a, OpenBitSet b) {
+    int aa=-1,bb=-1;
+    do {
+      aa = a.nextSetBit(aa+1);
+      bb = (int) b.nextSetBit((long) (bb+1));
+      assertEquals(aa,bb);
+    } while (aa>=0);
+  }
+
+  void doPrevSetBit(BitSet a, OpenBitSet b) {
+    int aa = a.size() + random.nextInt(100);
+    int bb = aa;
+    do {
+      // aa = a.prevSetBit(aa-1);
+      aa--;
+      while ((aa >= 0) && (! a.get(aa))) {
+      	aa--;
+      }
+      bb = b.prevSetBit(bb-1);
+      assertEquals(aa,bb);
+    } while (aa>=0);
+  }
+
+  void doPrevSetBitLong(BitSet a, OpenBitSet b) {
+    int aa = a.size() + random.nextInt(100);
+    int bb = aa;
+    do {
+      // aa = a.prevSetBit(aa-1);
+      aa--;
+      while ((aa >= 0) && (! a.get(aa))) {
+      	aa--;
+      }
+      bb = (int) b.prevSetBit((long) (bb-1));
+      assertEquals(aa,bb);
+    } while (aa>=0);
+  }
+
+  // test interleaving different OpenBitSetIterator.next()/skipTo()
+  void doIterate(BitSet a, OpenBitSet b, int mode) {
+    if (mode==1) doIterate1(a, b);
+    if (mode==2) doIterate2(a, b);
+  }
+
+  void doIterate1(BitSet a, OpenBitSet b) {
+    int aa=-1,bb=-1;
+    OpenBitSetIterator iterator = new OpenBitSetIterator(b);
+    do {
+      aa = a.nextSetBit(aa+1);
+      bb = random.nextBoolean() ? iterator.nextDoc() : iterator.advance(bb + 1);
+      assertEquals(aa == -1 ? DocIdSetIterator.NO_MORE_DOCS : aa, bb);
+    } while (aa>=0);
+  }
+
+  void doIterate2(BitSet a, OpenBitSet b) {
+    int aa=-1,bb=-1;
+    OpenBitSetIterator iterator = new OpenBitSetIterator(b);
+    do {
+      aa = a.nextSetBit(aa+1);
+      bb = random.nextBoolean() ? iterator.nextDoc() : iterator.advance(bb + 1);
+      assertEquals(aa == -1 ? DocIdSetIterator.NO_MORE_DOCS : aa, bb);
+    } while (aa>=0);
+  }
+
+  void doRandomSets(int maxSize, int iter, int mode) {
+    BitSet a0=null;
+    OpenBitSet b0=null;
+
+    for (int i=0; i<iter; i++) {
+      int sz = random.nextInt(maxSize);
+      BitSet a = new BitSet(sz);
+      OpenBitSet b = new OpenBitSet(sz);
+
+      // test the various ways of setting bits
+      if (sz>0) {
+        int nOper = random.nextInt(sz);
+        for (int j=0; j<nOper; j++) {
+          int idx;         
+
+          idx = random.nextInt(sz);
+          a.set(idx);
+          b.fastSet(idx);
+          
+          idx = random.nextInt(sz);
+          a.set(idx);
+          b.fastSet((long) idx);
+          
+          idx = random.nextInt(sz);
+          a.clear(idx);
+          b.fastClear(idx);
+          
+          idx = random.nextInt(sz);
+          a.clear(idx);
+          b.fastClear((long) idx);
+          
+          idx = random.nextInt(sz);
+          a.flip(idx);
+          b.fastFlip(idx);
+
+          boolean val = b.flipAndGet(idx);
+          boolean val2 = b.flipAndGet(idx);
+          assertTrue(val != val2);
+
+          idx = random.nextInt(sz);
+          a.flip(idx);
+          b.fastFlip((long) idx);
+
+          val = b.flipAndGet((long) idx);
+          val2 = b.flipAndGet((long) idx);
+          assertTrue(val != val2);
+
+          val = b.getAndSet(idx);
+          assertTrue(val2 == val);
+          assertTrue(b.get(idx));
+          
+          if (!val) b.fastClear(idx);
+          assertTrue(b.get(idx) == val);
+        }
+      }
+
+      // test that the various ways of accessing the bits are equivalent
+      doGet(a,b);
+      doGetFast(a, b, sz);
+
+      // test ranges, including possible extension
+      int fromIndex, toIndex;
+      fromIndex = random.nextInt(sz+80);
+      toIndex = fromIndex + random.nextInt((sz>>1)+1);
+      BitSet aa = (BitSet)a.clone(); aa.flip(fromIndex,toIndex);
+      OpenBitSet bb = (OpenBitSet)b.clone(); bb.flip(fromIndex,toIndex);
+
+      doIterate(aa,bb, mode);   // a problem here is from flip or doIterate
+
+      fromIndex = random.nextInt(sz+80);
+      toIndex = fromIndex + random.nextInt((sz>>1)+1);
+      aa = (BitSet)a.clone(); aa.clear(fromIndex,toIndex);
+      bb = (OpenBitSet)b.clone(); bb.clear(fromIndex,toIndex);
+
+      doNextSetBit(aa,bb); // a problem here is from clear() or nextSetBit
+      doNextSetBitLong(aa,bb);
+      
+      doPrevSetBit(aa,bb);
+      doPrevSetBitLong(aa,bb);
+
+      fromIndex = random.nextInt(sz+80);
+      toIndex = fromIndex + random.nextInt((sz>>1)+1);
+      aa = (BitSet)a.clone(); aa.set(fromIndex,toIndex);
+      bb = (OpenBitSet)b.clone(); bb.set(fromIndex,toIndex);
+
+      doNextSetBit(aa,bb); // a problem here is from set() or nextSetBit
+      doNextSetBitLong(aa,bb);
+    
+      doPrevSetBit(aa,bb);
+      doPrevSetBitLong(aa,bb);
+
+      if (a0 != null) {
+        assertEquals( a.equals(a0), b.equals(b0));
+
+        assertEquals(a.cardinality(), b.cardinality());
+
+        BitSet a_and = (BitSet)a.clone(); a_and.and(a0);
+        BitSet a_or = (BitSet)a.clone(); a_or.or(a0);
+        BitSet a_xor = (BitSet)a.clone(); a_xor.xor(a0);
+        BitSet a_andn = (BitSet)a.clone(); a_andn.andNot(a0);
+
+        OpenBitSet b_and = (OpenBitSet)b.clone(); assertEquals(b,b_and); b_and.and(b0);
+        OpenBitSet b_or = (OpenBitSet)b.clone(); b_or.or(b0);
+        OpenBitSet b_xor = (OpenBitSet)b.clone(); b_xor.xor(b0);
+        OpenBitSet b_andn = (OpenBitSet)b.clone(); b_andn.andNot(b0);
+
+        doIterate(a_and,b_and, mode);
+        doIterate(a_or,b_or, mode);
+        doIterate(a_xor,b_xor, mode);
+        doIterate(a_andn,b_andn, mode);
+
+        assertEquals(a_and.cardinality(), b_and.cardinality());
+        assertEquals(a_or.cardinality(), b_or.cardinality());
+        assertEquals(a_xor.cardinality(), b_xor.cardinality());
+        assertEquals(a_andn.cardinality(), b_andn.cardinality());
+
+        // test non-mutating popcounts
+        assertEquals(b_and.cardinality(), OpenBitSet.intersectionCount(b,b0));
+        assertEquals(b_or.cardinality(), OpenBitSet.unionCount(b,b0));
+        assertEquals(b_xor.cardinality(), OpenBitSet.xorCount(b,b0));
+        assertEquals(b_andn.cardinality(), OpenBitSet.andNotCount(b,b0));
+      }
+
+      a0=a;
+      b0=b;
+    }
+  }
+  
+  // large enough to flush obvious bugs, small enough to run in <.5 sec as part of a
+  // larger testsuite.
+  public void testSmall() {
+    doRandomSets(atLeast(1200), atLeast(1000), 1);
+    doRandomSets(atLeast(1200), atLeast(1000), 2);
+  }
+
+  // uncomment to run a bigger test (~2 minutes).
+  /*
+  public void testBig() {
+    doRandomSets(2000,200000, 1);
+    doRandomSets(2000,200000, 2);
+  }
+  */
+
+  public void testEquals() {
+    OpenBitSet b1 = new OpenBitSet(1111);
+    OpenBitSet b2 = new OpenBitSet(2222);
+    assertTrue(b1.equals(b2));
+    assertTrue(b2.equals(b1));
+    b1.set(10);
+    assertFalse(b1.equals(b2));
+    assertFalse(b2.equals(b1));
+    b2.set(10);
+    assertTrue(b1.equals(b2));
+    assertTrue(b2.equals(b1));
+    b2.set(2221);
+    assertFalse(b1.equals(b2));
+    assertFalse(b2.equals(b1));
+    b1.set(2221);
+    assertTrue(b1.equals(b2));
+    assertTrue(b2.equals(b1));
+
+    // try different type of object
+    assertFalse(b1.equals(new Object()));
+  }
+  
+  public void testHashCodeEquals() {
+    OpenBitSet bs1 = new OpenBitSet(200);
+    OpenBitSet bs2 = new OpenBitSet(64);
+    bs1.set(3);
+    bs2.set(3);       
+    assertEquals(bs1, bs2);
+    assertEquals(bs1.hashCode(), bs2.hashCode());
+  } 
+
+  
+  private OpenBitSet makeOpenBitSet(int[] a) {
+    OpenBitSet bs = new OpenBitSet();
+    for (int e: a) {
+      bs.set(e);
+    }
+    return bs;
+  }
+
+  private BitSet makeBitSet(int[] a) {
+    BitSet bs = new BitSet();
+    for (int e: a) {
+      bs.set(e);
+    }
+    return bs;
+  }
+
+  private void checkPrevSetBitArray(int [] a) {
+    OpenBitSet obs = makeOpenBitSet(a);
+    BitSet bs = makeBitSet(a);
+    doPrevSetBit(bs, obs);
+  }
+
+  public void testPrevSetBit() {
+    checkPrevSetBitArray(new int[] {});
+    checkPrevSetBitArray(new int[] {0});
+    checkPrevSetBitArray(new int[] {0,2});
+  }
+}
+
+
+
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestPriorityQueue.java b/lucene/backwards/src/test/org/apache/lucene/util/TestPriorityQueue.java
new file mode 100644
index 0000000..93b6378
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestPriorityQueue.java
@@ -0,0 +1,116 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Random;
+
+public class TestPriorityQueue extends LuceneTestCase {
+
+    private static class IntegerQueue extends PriorityQueue<Integer> {
+        public IntegerQueue(int count) {
+            super();
+            initialize(count);
+        }
+
+        @Override
+        protected boolean lessThan(Integer a, Integer b) {
+            return (a < b);
+        }
+    }
+
+    public void testPQ() throws Exception {
+        testPQ(atLeast(10000), random);
+    }
+
+    public static void testPQ(int count, Random gen) {
+        PriorityQueue<Integer> pq = new IntegerQueue(count);
+        int sum = 0, sum2 = 0;
+
+        for (int i = 0; i < count; i++)
+        {
+            int next = gen.nextInt();
+            sum += next;
+            pq.add(next);
+        }
+
+        //      Date end = new Date();
+
+        //      System.out.print(((float)(end.getTime()-start.getTime()) / count) * 1000);
+        //      System.out.println(" microseconds/put");
+
+        //      start = new Date();
+
+        int last = Integer.MIN_VALUE;
+        for (int i = 0; i < count; i++)
+        {
+            Integer next = pq.pop();
+            assertTrue(next.intValue() >= last);
+            last = next.intValue();
+            sum2 += last;
+        }
+
+        assertEquals(sum, sum2);
+        //      end = new Date();
+
+        //      System.out.print(((float)(end.getTime()-start.getTime()) / count) * 1000);
+        //      System.out.println(" microseconds/pop");
+    }
+
+    public void testClear() {
+        PriorityQueue<Integer> pq = new IntegerQueue(3);
+        pq.add(2);
+        pq.add(3);
+        pq.add(1);
+        assertEquals(3, pq.size());
+        pq.clear();
+        assertEquals(0, pq.size());
+    }
+    
+    public void testFixedSize() {
+        PriorityQueue<Integer> pq = new IntegerQueue(3);
+        pq.insertWithOverflow(2);
+        pq.insertWithOverflow(3);
+        pq.insertWithOverflow(1);
+        pq.insertWithOverflow(5);
+        pq.insertWithOverflow(7);
+        pq.insertWithOverflow(1);
+        assertEquals(3, pq.size());
+        assertEquals((Integer) 3, pq.top());
+    }
+    
+    public void testInsertWithOverflow() {
+      int size = 4;
+      PriorityQueue<Integer> pq = new IntegerQueue(size);
+      Integer i1 = 2;
+      Integer i2 = 3;
+      Integer i3 = 1;
+      Integer i4 = 5;
+      Integer i5 = 7;
+      Integer i6 = 1;
+      
+      assertNull(pq.insertWithOverflow(i1));
+      assertNull(pq.insertWithOverflow(i2));
+      assertNull(pq.insertWithOverflow(i3));
+      assertNull(pq.insertWithOverflow(i4));
+      assertTrue(pq.insertWithOverflow(i5) == i3); // i3 should have been dropped
+      assertTrue(pq.insertWithOverflow(i6) == i6); // i6 should not have been inserted
+      assertEquals(size, pq.size());
+      assertEquals((Integer) 2, pq.top());
+    }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestRamUsageEstimator.java b/lucene/backwards/src/test/org/apache/lucene/util/TestRamUsageEstimator.java
new file mode 100644
index 0000000..a64d094
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestRamUsageEstimator.java
@@ -0,0 +1,51 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestRamUsageEstimator extends LuceneTestCase {
+
+  public void testBasic() {
+    RamUsageEstimator rue = new RamUsageEstimator();
+    rue.estimateRamUsage("test str");
+    
+    rue.estimateRamUsage("test strin");
+    
+    Holder holder = new Holder();
+    holder.holder = new Holder("string2", 5000L);
+    rue.estimateRamUsage(holder);
+    
+    String[] strings = new String[]{new String("test strin"), new String("hollow"), new String("catchmaster")};
+    rue.estimateRamUsage(strings);
+  }
+  
+  private static final class Holder {
+    long field1 = 5000L;
+    String name = "name";
+    Holder holder;
+    
+    Holder() {
+    }
+    
+    Holder(String name, long field1) {
+      this.name = name;
+      this.field1 = field1;
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java b/lucene/backwards/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java
new file mode 100644
index 0000000..2425cf1
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java
@@ -0,0 +1,143 @@
+package org.apache.lucene.util;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Testcase for {@link RecyclingByteBlockAllocator}
+ */
+public class TestRecyclingByteBlockAllocator extends LuceneTestCase {
+
+  /**
+   */
+  @Override
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+  }
+
+  private RecyclingByteBlockAllocator newAllocator() {
+    return new RecyclingByteBlockAllocator(1 << (2 + random.nextInt(15)),
+        random.nextInt(97), new AtomicLong());
+  }
+
+  @Test
+  public void testAllocate() {
+    RecyclingByteBlockAllocator allocator = newAllocator();
+    HashSet<byte[]> set = new HashSet<byte[]>();
+    byte[] block = allocator.getByteBlock();
+    set.add(block);
+    assertNotNull(block);
+    final int size = block.length;
+
+    int num = atLeast(97);
+    for (int i = 0; i < num; i++) {
+      block = allocator.getByteBlock();
+      assertNotNull(block);
+      assertEquals(size, block.length);
+      assertTrue("block is returned twice", set.add(block));
+      assertEquals(size * (i + 2), allocator.bytesUsed()); // zero based + 1
+      assertEquals(0, allocator.numBufferedBlocks());
+    }
+  }
+
+  @Test
+  public void testAllocateAndRecycle() {
+    RecyclingByteBlockAllocator allocator = newAllocator();
+    HashSet<byte[]> allocated = new HashSet<byte[]>();
+
+    byte[] block = allocator.getByteBlock();
+    allocated.add(block);
+    assertNotNull(block);
+    final int size = block.length;
+
+    int numIters = atLeast(97);
+    for (int i = 0; i < numIters; i++) {
+      int num = 1 + random.nextInt(39);
+      for (int j = 0; j < num; j++) {
+        block = allocator.getByteBlock();
+        assertNotNull(block);
+        assertEquals(size, block.length);
+        assertTrue("block is returned twice", allocated.add(block));
+        assertEquals(size * (allocated.size() +  allocator.numBufferedBlocks()), allocator
+            .bytesUsed());
+      }
+      byte[][] array = allocated.toArray(new byte[0][]);
+      int begin = random.nextInt(array.length);
+      int end = begin + random.nextInt(array.length - begin);
+      List<byte[]> selected = new ArrayList<byte[]>();
+      for (int j = begin; j < end; j++) {
+        selected.add(array[j]);
+      }
+      allocator.recycleByteBlocks(array, begin, end);
+      for (int j = begin; j < end; j++) {
+        assertNull(array[j]);
+        byte[] b = selected.remove(0);
+        assertTrue(allocated.remove(b));
+      }
+    }
+  }
+
+  @Test
+  public void testAllocateAndFree() {
+    RecyclingByteBlockAllocator allocator = newAllocator();
+    HashSet<byte[]> allocated = new HashSet<byte[]>();
+    int freeButAllocated = 0;
+    byte[] block = allocator.getByteBlock();
+    allocated.add(block);
+    assertNotNull(block);
+    final int size = block.length;
+
+    int numIters = atLeast(97);
+    for (int i = 0; i < numIters; i++) {
+      int num = 1 + random.nextInt(39);
+      for (int j = 0; j < num; j++) {
+        block = allocator.getByteBlock();
+        freeButAllocated = Math.max(0, freeButAllocated - 1);
+        assertNotNull(block);
+        assertEquals(size, block.length);
+        assertTrue("block is returned twice", allocated.add(block));
+        assertEquals(size * (allocated.size() + allocator.numBufferedBlocks()),
+            allocator.bytesUsed());
+      }
+
+      byte[][] array = allocated.toArray(new byte[0][]);
+      int begin = random.nextInt(array.length);
+      int end = begin + random.nextInt(array.length - begin);
+      for (int j = begin; j < end; j++) {
+        byte[] b = array[j];
+        assertTrue(allocated.remove(b));
+      }
+      allocator.recycleByteBlocks(array, begin, end);
+      for (int j = begin; j < end; j++) {
+        assertNull(array[j]);
+      }
+      // randomly free blocks
+      int numFreeBlocks = allocator.numBufferedBlocks();
+      int freeBlocks = allocator.freeBlocks(random.nextInt(7 + allocator
+          .maxBufferedBlocks()));
+      assertEquals(allocator.numBufferedBlocks(), numFreeBlocks - freeBlocks);
+    }
+  }
+}
\ No newline at end of file
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestSetOnce.java b/lucene/backwards/src/test/org/apache/lucene/util/TestSetOnce.java
new file mode 100644
index 0000000..fad8e19
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestSetOnce.java
@@ -0,0 +1,99 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Random;
+
+import org.apache.lucene.util.SetOnce.AlreadySetException;
+import org.junit.Test;
+
+public class TestSetOnce extends LuceneTestCase {
+
+  private static final class SetOnceThread extends Thread {
+    SetOnce<Integer> set;
+    boolean success = false;
+    final Random RAND;
+    
+    public SetOnceThread(Random random) {
+      RAND = new Random(random.nextLong());
+    }
+    
+    @Override
+    public void run() {
+      try {
+        sleep(RAND.nextInt(10)); // sleep for a short time
+        set.set(new Integer(Integer.parseInt(getName().substring(2))));
+        success = true;
+      } catch (InterruptedException e) {
+        // ignore
+      } catch (RuntimeException e) {
+        // TODO: change exception type
+        // expected.
+        success = false;
+      }
+    }
+  }
+  
+  @Test
+  public void testEmptyCtor() throws Exception {
+    SetOnce<Integer> set = new SetOnce<Integer>();
+    assertNull(set.get());
+  }
+  
+  @Test(expected=AlreadySetException.class)
+  public void testSettingCtor() throws Exception {
+    SetOnce<Integer> set = new SetOnce<Integer>(new Integer(5));
+    assertEquals(5, set.get().intValue());
+    set.set(new Integer(7));
+  }
+  
+  @Test(expected=AlreadySetException.class)
+  public void testSetOnce() throws Exception {
+    SetOnce<Integer> set = new SetOnce<Integer>();
+    set.set(new Integer(5));
+    assertEquals(5, set.get().intValue());
+    set.set(new Integer(7));
+  }
+  
+  @Test
+  public void testSetMultiThreaded() throws Exception {
+    final SetOnce<Integer> set = new SetOnce<Integer>();
+    SetOnceThread[] threads = new SetOnceThread[10];
+    for (int i = 0; i < threads.length; i++) {
+      threads[i] = new SetOnceThread(random);
+      threads[i].setName("t-" + (i+1));
+      threads[i].set = set;
+    }
+    
+    for (Thread t : threads) {
+      t.start();
+    }
+
+    for (Thread t : threads) {
+      t.join();
+    }
+    
+    for (SetOnceThread t : threads) {
+      if (t.success) {
+        int expectedVal = Integer.parseInt(t.getName().substring(2));
+        assertEquals("thread " + t.getName(), expectedVal, t.set.get().intValue());
+      }
+    }
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestSmallFloat.java b/lucene/backwards/src/test/org/apache/lucene/util/TestSmallFloat.java
new file mode 100644
index 0000000..2ee03c6
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestSmallFloat.java
@@ -0,0 +1,150 @@
+package org.apache.lucene.util;
+
+/**
+ * Copyright 2005 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestSmallFloat extends LuceneTestCase {
+
+  // original lucene byteToFloat
+  static float orig_byteToFloat(byte b) {
+    if (b == 0)                                   // zero is a special case
+      return 0.0f;
+    int mantissa = b & 7;
+    int exponent = (b >> 3) & 31;
+    int bits = ((exponent+(63-15)) << 24) | (mantissa << 21);
+    return Float.intBitsToFloat(bits);
+  }
+
+  // original lucene floatToByte (since lucene 1.3)
+  static byte orig_floatToByte_v13(float f) {
+    if (f < 0.0f)                                 // round negatives up to zero
+      f = 0.0f;
+
+    if (f == 0.0f)                                // zero is a special case
+      return 0;
+
+    int bits = Float.floatToIntBits(f);           // parse float into parts
+    int mantissa = (bits & 0xffffff) >> 21;
+    int exponent = (((bits >> 24) & 0x7f) - 63) + 15;
+
+    if (exponent > 31) {                          // overflow: use max value
+      exponent = 31;
+      mantissa = 7;
+    }
+
+    if (exponent < 0) {                           // underflow: use min value
+      exponent = 0;
+      mantissa = 1;
+    }
+
+    return (byte)((exponent << 3) | mantissa);    // pack into a byte
+  }
+
+  // This is the original lucene floatToBytes (from v1.3)
+  // except with the underflow detection bug fixed for values like 5.8123817E-10f
+  static byte orig_floatToByte(float f) {
+    if (f < 0.0f)                                 // round negatives up to zero
+      f = 0.0f;
+
+    if (f == 0.0f)                                // zero is a special case
+      return 0;
+
+    int bits = Float.floatToIntBits(f);           // parse float into parts
+    int mantissa = (bits & 0xffffff) >> 21;
+    int exponent = (((bits >> 24) & 0x7f) - 63) + 15;
+
+    if (exponent > 31) {                          // overflow: use max value
+      exponent = 31;
+      mantissa = 7;
+    }
+
+    if (exponent < 0 || exponent == 0 && mantissa == 0) { // underflow: use min value
+      exponent = 0;
+      mantissa = 1;
+    }
+
+    return (byte)((exponent << 3) | mantissa);    // pack into a byte
+  }
+
+
+  public void testByteToFloat() {
+    for (int i=0; i<256; i++) {
+      float f1 = orig_byteToFloat((byte)i);
+      float f2 = SmallFloat.byteToFloat((byte)i, 3,15);
+      float f3 = SmallFloat.byte315ToFloat((byte)i);
+      assertEquals(f1,f2,0.0);
+      assertEquals(f2,f3,0.0);
+
+      float f4 = SmallFloat.byteToFloat((byte)i,5,2);
+      float f5 = SmallFloat.byte52ToFloat((byte)i);
+      assertEquals(f4,f5,0.0);
+    }
+  }
+
+  public void testFloatToByte() {
+    assertEquals(0, orig_floatToByte_v13(5.8123817E-10f));       // verify the old bug (see LUCENE-2937)
+    assertEquals(1, orig_floatToByte(5.8123817E-10f));           // verify it's fixed in this test code
+    assertEquals(1, SmallFloat.floatToByte315(5.8123817E-10f));  // verify it's fixed
+
+    // test some constants
+    assertEquals(0, SmallFloat.floatToByte315(0));
+    assertEquals(1, SmallFloat.floatToByte315(Float.MIN_VALUE));             // underflow rounds up to smallest positive
+    assertEquals(255, SmallFloat.floatToByte315(Float.MAX_VALUE) & 0xff);    // overflow rounds down to largest positive
+    assertEquals(255, SmallFloat.floatToByte315(Float.POSITIVE_INFINITY) & 0xff);
+
+    // all negatives map to 0
+    assertEquals(0, SmallFloat.floatToByte315(-Float.MIN_VALUE));
+    assertEquals(0, SmallFloat.floatToByte315(-Float.MAX_VALUE));
+    assertEquals(0, SmallFloat.floatToByte315(Float.NEGATIVE_INFINITY));
+
+
+    // up iterations for more exhaustive test after changing something
+    int num = atLeast(100000);
+    for (int i = 0; i < num; i++) {
+      float f = Float.intBitsToFloat(random.nextInt());
+      if (Float.isNaN(f)) continue;    // skip NaN
+      byte b1 = orig_floatToByte(f);
+      byte b2 = SmallFloat.floatToByte(f,3,15);
+      byte b3 = SmallFloat.floatToByte315(f);
+      assertEquals(b1,b2);
+      assertEquals(b2,b3);
+
+      byte b4 = SmallFloat.floatToByte(f,5,2);
+      byte b5 = SmallFloat.floatToByte52(f);
+      assertEquals(b4,b5);
+    }
+  }
+
+  /***
+  // Do an exhaustive test of all possible floating point values
+  // for the 315 float against the original norm encoding in Similarity.
+  // Takes 75 seconds on my Pentium4 3GHz, with Java5 -server
+  public void testAllFloats() {
+    for(int i = Integer.MIN_VALUE;;i++) {
+      float f = Float.intBitsToFloat(i);
+      if (f==f) { // skip non-numbers
+        byte b1 = orig_floatToByte(f);
+        byte b2 = SmallFloat.floatToByte315(f);
+        if (b1!=b2 || b2==0 && f>0) {
+          fail("Failed floatToByte315 for float " + f + " source bits="+Integer.toHexString(i) + " float raw bits=" + Integer.toHexString(Float.floatToRawIntBits(i)));
+        }
+      }
+      if (i==Integer.MAX_VALUE) break;
+    }
+  }
+  ***/
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestSortedVIntList.java b/lucene/backwards/src/test/org/apache/lucene/util/TestSortedVIntList.java
new file mode 100644
index 0000000..cef1e7c
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestSortedVIntList.java
@@ -0,0 +1,201 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.BitSet;
+
+import junit.framework.TestSuite;
+import junit.textui.TestRunner;
+
+import org.apache.lucene.search.DocIdSetIterator;
+
+public class TestSortedVIntList extends LuceneTestCase {
+  /** Main for running test case by itself. */
+  public static void main(String args[]) {
+    TestRunner.run(new TestSuite(TestSortedVIntList.class));
+  }
+  
+  void tstIterator (
+          SortedVIntList vintList,
+          int[] ints) throws IOException {
+    for (int i = 0; i < ints.length; i++) {
+      if ((i > 0) && (ints[i-1] == ints[i])) {
+        return; // DocNrSkipper should not skip to same document.
+      }
+    }
+    DocIdSetIterator m = vintList.iterator();
+    for (int i = 0; i < ints.length; i++) {
+      assertTrue("No end of Matcher at: " + i, m.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+      assertEquals(ints[i], m.docID());
+    }
+    assertTrue("End of Matcher", m.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
+  }
+
+  void tstVIntList(
+          SortedVIntList vintList,
+          int[] ints,
+          int expectedByteSize) throws IOException {
+    assertEquals("Size", ints.length, vintList.size());
+    assertEquals("Byte size", expectedByteSize, vintList.getByteSize());
+    tstIterator(vintList, ints);
+  }
+
+  public void tstViaBitSet(int [] ints, int expectedByteSize) throws IOException {
+    final int MAX_INT_FOR_BITSET = 1024 * 1024;
+    BitSet bs = new BitSet();
+    for (int i = 0; i < ints.length; i++) {
+      if (ints[i] > MAX_INT_FOR_BITSET) {
+        return; // BitSet takes too much memory
+      }
+      if ((i > 0) && (ints[i-1] == ints[i])) {
+        return; // BitSet cannot store duplicate.
+      }
+      bs.set(ints[i]);
+    }
+    SortedVIntList svil = new SortedVIntList(bs);
+    tstVIntList(svil, ints, expectedByteSize);
+    tstVIntList(new SortedVIntList(svil.iterator()), ints, expectedByteSize);
+  }
+  
+  private static final int VB1 = 0x7F;
+  private static final int BIT_SHIFT = 7;
+  private static final int VB2 = (VB1 << BIT_SHIFT) | VB1;
+  private static final int VB3 = (VB2 << BIT_SHIFT) | VB1;
+  private static final int VB4 = (VB3 << BIT_SHIFT) | VB1;
+
+  private int vIntByteSize(int i) {
+    assert i >= 0;
+    if (i <= VB1) return 1;
+    if (i <= VB2) return 2;
+    if (i <= VB3) return 3;
+    if (i <= VB4) return 4;
+    return 5;
+  }
+
+  private int vIntListByteSize(int [] ints) {
+    int byteSize = 0;
+    int last = 0;
+    for (int i = 0; i < ints.length; i++) {
+      byteSize += vIntByteSize(ints[i] - last);
+      last = ints[i];
+    }
+    return byteSize;
+  }
+  
+  public void tstInts(int [] ints) {
+    int expectedByteSize = vIntListByteSize(ints);
+    try {
+      tstVIntList(new SortedVIntList(ints), ints, expectedByteSize);
+      tstViaBitSet(ints, expectedByteSize);
+    } catch (IOException ioe) {
+      throw new Error(ioe);
+    }
+  }
+
+  public void tstIllegalArgExc(int [] ints) {
+    try {
+      new SortedVIntList(ints);
+    }
+    catch (IllegalArgumentException e) {
+      return;
+    }
+    fail("Expected IllegalArgumentException");    
+  }
+
+  private int[] fibArray(int a, int b, int size) {
+    final int[] fib = new int[size];
+    fib[0] = a;
+    fib[1] = b;
+    for (int i = 2; i < size; i++) {
+      fib[i] = fib[i-1] + fib[i-2];
+    }
+    return fib;
+  }
+
+  private int[] reverseDiffs(int []ints) { // reverse the order of the successive differences
+    final int[] res = new int[ints.length];
+    for (int i = 0; i < ints.length; i++) {
+      res[i] = ints[ints.length - 1] + (ints[0] - ints[ints.length - 1 - i]);
+    }
+    return res;
+  }
+
+  public void test01() {
+    tstInts(new int[] {});
+  }
+  public void test02() {
+    tstInts(new int[] {0});
+  }
+  public void test04a() {
+    tstInts(new int[] {0, VB2 - 1});
+  }
+  public void test04b() {
+    tstInts(new int[] {0, VB2});
+  }
+  public void test04c() {
+    tstInts(new int[] {0, VB2 + 1});
+  }
+  public void test05() {
+    tstInts(fibArray(0,1,7)); // includes duplicate value 1
+  }
+  public void test05b() {
+    tstInts(reverseDiffs(fibArray(0,1,7)));
+  }
+  public void test06() {
+    tstInts(fibArray(1,2,45)); // no duplicates, size 46 exceeds max int.
+  }
+  public void test06b() {
+    tstInts(reverseDiffs(fibArray(1,2,45)));
+  }
+  public void test07a() {
+    tstInts(new int[] {0, VB3});
+  }
+  public void test07b() {
+    tstInts(new int[] {1, VB3 + 2});
+  }
+  public void test07c() {
+    tstInts(new int[] {2, VB3 + 4});
+  }
+  public void test08a() {
+    tstInts(new int[] {0, VB4 + 1});
+  }
+  public void test08b() {
+    tstInts(new int[] {1, VB4 + 1});
+  }
+  public void test08c() {
+    tstInts(new int[] {2, VB4 + 1});
+  }
+
+  public void test10() {
+    tstIllegalArgExc(new int[] {-1});
+  }
+  public void test11() {
+    tstIllegalArgExc(new int[] {1,0});
+  }
+  public void test12() {
+   tstIllegalArgExc(new int[] {0,1,1,2,3,5,8,0});
+  }
+  public void test13Allocation() throws Exception {
+    int [] a = new int[2000]; // SortedVIntList initial byte size is 128
+    for (int i = 0; i < a.length; i++) {
+      a[i] = (107 + i) * i;
+    }
+    tstIterator(new SortedVIntList(a), a);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestStringIntern.java b/lucene/backwards/src/test/org/apache/lucene/util/TestStringIntern.java
new file mode 100755
index 0000000..db2b6cd
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestStringIntern.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.util;
+import java.util.Random;
+
+public class TestStringIntern extends LuceneTestCase {
+  String[] testStrings;
+  String[] internedStrings;
+
+  private String randStr(int len) {
+    char[] arr = new char[len];
+    for (int i=0; i<len; i++) {
+      arr[i] = (char)('a' + random.nextInt(26));
+    }
+    return new String(arr);
+  }
+
+  private void makeStrings(int sz) {
+    testStrings = new String[sz];
+    internedStrings = new String[sz];
+    for (int i=0; i<sz; i++) {
+      testStrings[i] = randStr(random.nextInt(8)+3);
+    }
+  }
+
+  public void testStringIntern() throws InterruptedException {
+    makeStrings(1024*10);  // something greater than the capacity of the default cache size
+    // makeStrings(100);  // realistic for perf testing
+    int nThreads = 20;
+    // final int iter=100000;
+    final int iter = atLeast(100000);
+    
+    // try native intern
+    // StringHelper.interner = new StringInterner();
+
+    Thread[] threads = new Thread[nThreads];
+    for (int i=0; i<nThreads; i++) {
+      final int seed = i;
+      threads[i] = new Thread() {
+        @Override
+        public void run() {
+          Random rand = new Random(seed);
+          String[] myInterned = new String[testStrings.length];
+          for (int j=0; j<iter; j++) {
+            int idx = rand.nextInt(testStrings.length);
+            String s = testStrings[idx];
+            if (rand.nextBoolean()) s = new String(s); // make a copy half of the time
+            String interned = StringHelper.intern(s);
+            String prevInterned = myInterned[idx];
+            String otherInterned = internedStrings[idx];
+
+            // test against other threads
+            if (otherInterned != null && otherInterned != interned) {
+              fail();
+            }
+            internedStrings[idx] = interned;
+
+            // test against local copy
+            if (prevInterned != null && prevInterned != interned) {
+              fail();
+            }
+            myInterned[idx] = interned;
+          }
+        }
+      };
+
+      threads[i].start();
+    }
+
+    for (int i=0; i<nThreads; i++) {
+      threads[i].join();
+    }
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestTwoPhaseCommitTool.java b/lucene/backwards/src/test/org/apache/lucene/util/TestTwoPhaseCommitTool.java
new file mode 100644
index 0000000..ddbb540
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestTwoPhaseCommitTool.java
@@ -0,0 +1,164 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.util.TwoPhaseCommitTool.TwoPhaseCommitWrapper;
+
+public class TestTwoPhaseCommitTool extends LuceneTestCase {
+
+  private static class TwoPhaseCommitImpl implements TwoPhaseCommit {
+    static boolean commitCalled = false;
+    final boolean failOnPrepare;
+    final boolean failOnCommit;
+    final boolean failOnRollback;
+    boolean rollbackCalled = false;
+    Map<String, String> prepareCommitData = null;
+    Map<String, String> commitData = null;
+
+    public TwoPhaseCommitImpl(boolean failOnPrepare, boolean failOnCommit, boolean failOnRollback) {
+      this.failOnPrepare = failOnPrepare;
+      this.failOnCommit = failOnCommit;
+      this.failOnRollback = failOnRollback;
+    }
+
+    public void prepareCommit() throws IOException {
+      prepareCommit(null);
+    }
+
+    public void prepareCommit(Map<String, String> commitData) throws IOException {
+      this.prepareCommitData = commitData;
+      assertFalse("commit should not have been called before all prepareCommit were", commitCalled);
+      if (failOnPrepare) {
+        throw new IOException("failOnPrepare");
+      }
+    }
+
+    public void commit() throws IOException {
+      commit(null);
+    }
+
+    public void commit(Map<String, String> commitData) throws IOException {
+      this.commitData = commitData;
+      commitCalled = true;
+      if (failOnCommit) {
+        throw new RuntimeException("failOnCommit");
+      }
+    }
+
+    public void rollback() throws IOException {
+      rollbackCalled = true;
+      if (failOnRollback) {
+        throw new Error("failOnRollback");
+      }
+    }
+  }
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    TwoPhaseCommitImpl.commitCalled = false; // reset count before every test
+  }
+
+  public void testPrepareThenCommit() throws Exception {
+    // tests that prepareCommit() is called on all objects before commit()
+    TwoPhaseCommitImpl[] objects = new TwoPhaseCommitImpl[2];
+    for (int i = 0; i < objects.length; i++) {
+      objects[i] = new TwoPhaseCommitImpl(false, false, false);
+    }
+
+    // following call will fail if commit() is called before all prepare() were
+    TwoPhaseCommitTool.execute(objects);
+  }
+
+  public void testRollback() throws Exception {
+    // tests that rollback is called if failure occurs at any stage
+    int numObjects = random.nextInt(8) + 3; // between [3, 10]
+    TwoPhaseCommitImpl[] objects = new TwoPhaseCommitImpl[numObjects];
+    for (int i = 0; i < objects.length; i++) {
+      boolean failOnPrepare = random.nextBoolean();
+      // we should not hit failures on commit usually
+      boolean failOnCommit = random.nextDouble() < 0.05;
+      boolean railOnRollback = random.nextBoolean();
+      objects[i] = new TwoPhaseCommitImpl(failOnPrepare, failOnCommit, railOnRollback);
+    }
+
+    boolean anyFailure = false;
+    try {
+      TwoPhaseCommitTool.execute(objects);
+    } catch (Throwable t) {
+      anyFailure = true;
+    }
+
+    if (anyFailure) {
+      // if any failure happened, ensure that rollback was called on all.
+      for (TwoPhaseCommitImpl tpc : objects) {
+        assertTrue("rollback was not called while a failure occurred during the 2-phase commit", tpc.rollbackCalled);
+      }
+    }
+  }
+
+  public void testWrapper() throws Exception {
+    // tests that TwoPhaseCommitWrapper delegates prepare/commit w/ commitData
+    TwoPhaseCommitImpl impl = new TwoPhaseCommitImpl(false, false, false);
+    HashMap<String, String> commitData = new HashMap<String, String>();
+    TwoPhaseCommitWrapper wrapper = new TwoPhaseCommitWrapper(impl, commitData);
+
+    wrapper.prepareCommit();
+    assertSame(commitData, impl.prepareCommitData);
+
+    // wrapper should ignore passed commitData
+    wrapper.prepareCommit(new HashMap<String, String>());
+    assertSame(commitData, impl.prepareCommitData);
+
+    wrapper.commit();
+    assertSame(commitData, impl.commitData);
+
+    // wrapper should ignore passed commitData
+    wrapper.commit(new HashMap<String, String>());
+    assertSame(commitData, impl.commitData);
+  }
+
+  public void testNullTPCs() throws Exception {
+    int numObjects = random.nextInt(4) + 3; // between [3, 6]
+    TwoPhaseCommit[] tpcs = new TwoPhaseCommit[numObjects];
+    boolean setNull = false;
+    for (int i = 0; i < tpcs.length; i++) {
+      boolean isNull = random.nextDouble() < 0.3;
+      if (isNull) {
+        setNull = true;
+        tpcs[i] = null;
+      } else {
+        tpcs[i] = new TwoPhaseCommitImpl(false, false, false);
+      }
+    }
+
+    if (!setNull) {
+      // none of the TPCs were picked to be null, pick one at random
+      int idx = random.nextInt(numObjects);
+      tpcs[idx] = null;
+    }
+
+    // following call would fail if TPCTool won't handle null TPCs properly
+    TwoPhaseCommitTool.execute(tpcs);
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestVersion.java b/lucene/backwards/src/test/org/apache/lucene/util/TestVersion.java
new file mode 100644
index 0000000..b2e8540
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestVersion.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.util;
+
+public class TestVersion extends LuceneTestCase {
+
+  public void test() {
+    for (Version v : Version.values()) {
+      assertTrue("LUCENE_CURRENT must be always onOrAfter("+v+")", Version.LUCENE_CURRENT.onOrAfter(v));
+    }
+    assertTrue(Version.LUCENE_30.onOrAfter(Version.LUCENE_29));
+    assertTrue(Version.LUCENE_30.onOrAfter(Version.LUCENE_30));
+    assertFalse(Version.LUCENE_29.onOrAfter(Version.LUCENE_30));
+  }
+
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestVersionComparator.java b/lucene/backwards/src/test/org/apache/lucene/util/TestVersionComparator.java
new file mode 100644
index 0000000..d9646d2
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestVersionComparator.java
@@ -0,0 +1,52 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Comparator;
+
+/**
+ * Tests for StringHelper.getVersionComparator
+ */
+public class TestVersionComparator extends LuceneTestCase {
+  public void testVersions() {
+    Comparator<String> comp = StringHelper.getVersionComparator();
+    assertTrue(comp.compare("1", "2") < 0);
+    assertTrue(comp.compare("1", "1") == 0);
+    assertTrue(comp.compare("2", "1") > 0);
+    
+    assertTrue(comp.compare("1.1", "1") > 0);
+    assertTrue(comp.compare("1", "1.1") < 0);
+    assertTrue(comp.compare("1.1", "1.1") == 0);
+    
+    assertTrue(comp.compare("1.0", "1") == 0);
+    assertTrue(comp.compare("1", "1.0") == 0);
+    assertTrue(comp.compare("1.0.1", "1.0") > 0);
+    assertTrue(comp.compare("1.0", "1.0.1") < 0);
+    
+    assertTrue(comp.compare("1.02.003", "1.2.3.0") == 0);
+    assertTrue(comp.compare("1.2.3.0", "1.02.003") == 0);
+    
+    assertTrue(comp.compare("1.10", "1.9") > 0);
+    assertTrue(comp.compare("1.9", "1.10") < 0);
+    
+    assertTrue(comp.compare("0", "1.0") < 0);
+    assertTrue(comp.compare("00", "1.0") < 0);
+    assertTrue(comp.compare("-1.0", "1.0") < 0);
+    assertTrue(comp.compare("3.0", Integer.toString(Integer.MIN_VALUE)) > 0);
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/TestVirtualMethod.java b/lucene/backwards/src/test/org/apache/lucene/util/TestVirtualMethod.java
new file mode 100644
index 0000000..2f41ad2
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/TestVirtualMethod.java
@@ -0,0 +1,105 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestVirtualMethod extends LuceneTestCase {
+
+  private static final VirtualMethod<TestVirtualMethod> publicTestMethod =
+    new VirtualMethod<TestVirtualMethod>(TestVirtualMethod.class, "publicTest", String.class);
+  private static final VirtualMethod<TestVirtualMethod> protectedTestMethod =
+    new VirtualMethod<TestVirtualMethod>(TestVirtualMethod.class, "protectedTest", int.class);
+
+  public void publicTest(String test) {}
+  protected void protectedTest(int test) {}
+  
+  static class TestClass1 extends TestVirtualMethod {
+    @Override
+    public void publicTest(String test) {}
+    @Override
+    protected void protectedTest(int test) {}
+  }
+
+  static class TestClass2 extends TestClass1 {
+    @Override // make it public here
+    public void protectedTest(int test) {}
+  }
+
+  static class TestClass3 extends TestClass2 {
+    @Override
+    public void publicTest(String test) {}
+  }
+
+  static class TestClass4 extends TestVirtualMethod {
+  }
+
+  static class TestClass5 extends TestClass4 {
+  }
+
+  public void testGeneral() {
+    assertEquals(0, publicTestMethod.getImplementationDistance(this.getClass()));
+    assertEquals(1, publicTestMethod.getImplementationDistance(TestClass1.class));
+    assertEquals(1, publicTestMethod.getImplementationDistance(TestClass2.class));
+    assertEquals(3, publicTestMethod.getImplementationDistance(TestClass3.class));
+    assertFalse(publicTestMethod.isOverriddenAsOf(TestClass4.class));
+    assertFalse(publicTestMethod.isOverriddenAsOf(TestClass5.class));
+    
+    assertEquals(0, protectedTestMethod.getImplementationDistance(this.getClass()));
+    assertEquals(1, protectedTestMethod.getImplementationDistance(TestClass1.class));
+    assertEquals(2, protectedTestMethod.getImplementationDistance(TestClass2.class));
+    assertEquals(2, protectedTestMethod.getImplementationDistance(TestClass3.class));
+    assertFalse(protectedTestMethod.isOverriddenAsOf(TestClass4.class));
+    assertFalse(protectedTestMethod.isOverriddenAsOf(TestClass5.class));
+    
+    assertTrue(VirtualMethod.compareImplementationDistance(TestClass3.class, publicTestMethod, protectedTestMethod) > 0);
+    assertEquals(0, VirtualMethod.compareImplementationDistance(TestClass5.class, publicTestMethod, protectedTestMethod));
+  }
+
+  @SuppressWarnings("unchecked")
+  public void testExceptions() {
+    try {
+      // cast to Class to remove generics:
+      publicTestMethod.getImplementationDistance((Class) LuceneTestCase.class);
+      fail("LuceneTestCase is not a subclass and can never override publicTest(String)");
+    } catch (IllegalArgumentException arg) {
+      // pass
+    }
+    
+    try {
+      new VirtualMethod<TestVirtualMethod>(TestVirtualMethod.class, "bogus");
+      fail("Method bogus() does not exist, so IAE should be thrown");
+    } catch (IllegalArgumentException arg) {
+      // pass
+    }
+    
+    try {
+      new VirtualMethod<TestClass2>(TestClass2.class, "publicTest", String.class);
+      fail("Method publicTest(String) is not declared in TestClass2, so IAE should be thrown");
+    } catch (IllegalArgumentException arg) {
+      // pass
+    }
+
+    try {
+      // try to create a second instance of the same baseClass / method combination
+      new VirtualMethod<TestVirtualMethod>(TestVirtualMethod.class, "publicTest", String.class);
+      fail("Violating singleton status succeeded");
+    } catch (UnsupportedOperationException arg) {
+      // pass
+    }
+  }
+  
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/fst/TestFSTs.java b/lucene/backwards/src/test/org/apache/lucene/util/fst/TestFSTs.java
new file mode 100644
index 0000000..2695ad9
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/fst/TestFSTs.java
@@ -0,0 +1,1578 @@
+package org.apache.lucene.util.fst;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.util.*;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermEnum;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.LineFileDocs;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.UnicodeUtil;
+import org.apache.lucene.util._TestUtil;
+import org.apache.lucene.util.fst.FST.Arc;
+
+public class TestFSTs extends LuceneTestCase {
+
+  private MockDirectoryWrapper dir;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    dir = newDirectory();
+    dir.setPreventDoubleWrite(false);
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    dir.close();
+    super.tearDown();
+  }
+
+  private static BytesRef toBytesRef(IntsRef ir) {
+    BytesRef br = new BytesRef(ir.length);
+    for(int i=0;i<ir.length;i++) {
+      int x = ir.ints[ir.offset+i];
+      assert x >= 0 && x <= 255;
+      br.bytes[i] = (byte) x;
+    }
+    br.length = ir.length;
+    return br;
+  }
+
+  private static IntsRef toIntsRef(String s, int inputMode) {
+    return toIntsRef(s, inputMode, new IntsRef(10));
+  }
+
+  private static IntsRef toIntsRef(String s, int inputMode, IntsRef ir) {
+    if (inputMode == 0) {
+      // utf8
+      return toIntsRef(new BytesRef(s), ir);
+    } else {
+      // utf32
+      return toIntsRefUTF32(s, ir);
+    }
+  }
+
+  private static IntsRef toIntsRefUTF32(String s, IntsRef ir) {
+    final int charLength = s.length();
+    int charIdx = 0;
+    int intIdx = 0;
+    while(charIdx < charLength) {
+      if (intIdx == ir.ints.length) {
+        ir.grow(intIdx+1);
+      }
+      final int utf32 = s.codePointAt(charIdx);
+      ir.ints[intIdx] = utf32;
+      charIdx += Character.charCount(utf32);
+      intIdx++;
+    }
+    ir.length = intIdx;
+    return ir;
+  }
+
+  private static IntsRef toIntsRef(BytesRef br, IntsRef ir) {
+    if (br.length > ir.ints.length) {
+      ir.grow(br.length);
+    }
+    for(int i=0;i<br.length;i++) {
+      ir.ints[i] = br.bytes[br.offset+i]&0xFF;
+    }
+    ir.length = br.length;
+    return ir;
+  }
+
+  public void testBasicFSA() throws IOException {
+    String[] strings = new String[] {"station", "commotion", "elation", "elastic", "plastic", "stop", "ftop", "ftation", "stat"};
+    String[] strings2 = new String[] {"station", "commotion", "elation", "elastic", "plastic", "stop", "ftop", "ftation"};
+    IntsRef[] terms = new IntsRef[strings.length];
+    IntsRef[] terms2 = new IntsRef[strings2.length];
+    for(int inputMode=0;inputMode<2;inputMode++) {
+      if (VERBOSE) {
+        System.out.println("TEST: inputMode=" + inputModeToString(inputMode));
+      }
+
+      for(int idx=0;idx<strings.length;idx++) {
+        terms[idx] = toIntsRef(strings[idx], inputMode);
+      }
+      for(int idx=0;idx<strings2.length;idx++) {
+        terms2[idx] = toIntsRef(strings2[idx], inputMode);
+      }
+      Arrays.sort(terms2);
+
+      doTest(inputMode, terms);
+    
+      // Test pre-determined FST sizes to make sure we haven't lost minimality (at least on this trivial set of terms):
+
+      // FSA
+      {
+        final Outputs<Object> outputs = NoOutputs.getSingleton();
+        final Object NO_OUTPUT = outputs.getNoOutput();      
+        final List<FSTTester.InputOutput<Object>> pairs = new ArrayList<FSTTester.InputOutput<Object>>(terms2.length);
+        for(IntsRef term : terms2) {
+          pairs.add(new FSTTester.InputOutput<Object>(term, NO_OUTPUT));
+        }
+        FST<Object> fst = new FSTTester<Object>(random, dir, inputMode, pairs, outputs).doTest(0, 0, false);
+        assertNotNull(fst);
+        assertEquals(22, fst.getNodeCount());
+        assertEquals(27, fst.getArcCount());
+      }
+
+      // FST ord pos int
+      {
+        final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
+        final List<FSTTester.InputOutput<Long>> pairs = new ArrayList<FSTTester.InputOutput<Long>>(terms2.length);
+        for(int idx=0;idx<terms2.length;idx++) {
+          pairs.add(new FSTTester.InputOutput<Long>(terms2[idx], outputs.get(idx)));
+        }
+        final FST<Long> fst = new FSTTester<Long>(random, dir, inputMode, pairs, outputs).doTest(0, 0, false);
+        assertNotNull(fst);
+        assertEquals(22, fst.getNodeCount());
+        assertEquals(27, fst.getArcCount());
+      }
+
+      // FST byte sequence ord
+      {
+        final ByteSequenceOutputs outputs = ByteSequenceOutputs.getSingleton();
+        final BytesRef NO_OUTPUT = outputs.getNoOutput();      
+        final List<FSTTester.InputOutput<BytesRef>> pairs = new ArrayList<FSTTester.InputOutput<BytesRef>>(terms2.length);
+        for(int idx=0;idx<terms2.length;idx++) {
+          final BytesRef output = random.nextInt(30) == 17 ? NO_OUTPUT : new BytesRef(Integer.toString(idx));
+          pairs.add(new FSTTester.InputOutput<BytesRef>(terms2[idx], output));
+        }
+        final FST<BytesRef> fst = new FSTTester<BytesRef>(random, dir, inputMode, pairs, outputs).doTest(0, 0, false);
+        assertNotNull(fst);
+        assertEquals(24, fst.getNodeCount());
+        assertEquals(30, fst.getArcCount());
+      }
+    }
+  }
+
+  private static String simpleRandomString(Random r) {
+    final int end = r.nextInt(10);
+    if (end == 0) {
+      // allow 0 length
+      return "";
+    }
+    final char[] buffer = new char[end];
+    for (int i = 0; i < end; i++) {
+      buffer[i] = (char) _TestUtil.nextInt(r, 97, 102);
+    }
+    return new String(buffer, 0, end);
+  }
+
+  // given set of terms, test the different outputs for them
+  private void doTest(int inputMode, IntsRef[] terms) throws IOException {
+    Arrays.sort(terms);
+
+    // NoOutputs (simple FSA)
+    {
+      final Outputs<Object> outputs = NoOutputs.getSingleton();
+      final Object NO_OUTPUT = outputs.getNoOutput();      
+      final List<FSTTester.InputOutput<Object>> pairs = new ArrayList<FSTTester.InputOutput<Object>>(terms.length);
+      for(IntsRef term : terms) {
+        pairs.add(new FSTTester.InputOutput<Object>(term, NO_OUTPUT));
+      }
+      new FSTTester<Object>(random, dir, inputMode, pairs, outputs).doTest();
+    }
+
+    // PositiveIntOutput (ord)
+    {
+      final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
+      final List<FSTTester.InputOutput<Long>> pairs = new ArrayList<FSTTester.InputOutput<Long>>(terms.length);
+      for(int idx=0;idx<terms.length;idx++) {
+        pairs.add(new FSTTester.InputOutput<Long>(terms[idx], outputs.get(idx)));
+      }
+      new FSTTester<Long>(random, dir, inputMode, pairs, outputs).doTest();
+    }
+
+    // PositiveIntOutput (random monotonically increasing positive number)
+    {
+      final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(random.nextBoolean());
+      final List<FSTTester.InputOutput<Long>> pairs = new ArrayList<FSTTester.InputOutput<Long>>(terms.length);
+      long lastOutput = 0;
+      for(int idx=0;idx<terms.length;idx++) {
+        final long value = lastOutput + _TestUtil.nextInt(random, 1, 1000);
+        lastOutput = value;
+        pairs.add(new FSTTester.InputOutput<Long>(terms[idx], outputs.get(value)));
+      }
+      new FSTTester<Long>(random, dir, inputMode, pairs, outputs).doTest();
+    }
+
+    // PositiveIntOutput (random positive number)
+    {
+      final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(random.nextBoolean());
+      final List<FSTTester.InputOutput<Long>> pairs = new ArrayList<FSTTester.InputOutput<Long>>(terms.length);
+      for(int idx=0;idx<terms.length;idx++) {
+        pairs.add(new FSTTester.InputOutput<Long>(terms[idx], outputs.get(random.nextLong()) & Long.MAX_VALUE));
+      }
+      new FSTTester<Long>(random, dir, inputMode, pairs, outputs).doTest();
+    }
+
+    // Pair<ord, (random monotonically increasing positive number>
+    {
+      final PositiveIntOutputs o1 = PositiveIntOutputs.getSingleton(random.nextBoolean());
+      final PositiveIntOutputs o2 = PositiveIntOutputs.getSingleton(random.nextBoolean());
+      final PairOutputs<Long,Long> outputs = new PairOutputs<Long,Long>(o1, o2);
+      final List<FSTTester.InputOutput<PairOutputs.Pair<Long,Long>>> pairs = new ArrayList<FSTTester.InputOutput<PairOutputs.Pair<Long,Long>>>(terms.length);
+      long lastOutput = 0;
+      for(int idx=0;idx<terms.length;idx++) {
+        final long value = lastOutput + _TestUtil.nextInt(random, 1, 1000);
+        lastOutput = value;
+        pairs.add(new FSTTester.InputOutput<PairOutputs.Pair<Long,Long>>(terms[idx],
+                                                                         outputs.get(o1.get(idx),
+                                                                                     o2.get(value))));
+      }
+      new FSTTester<PairOutputs.Pair<Long,Long>>(random, dir, inputMode, pairs, outputs).doTest();
+    }
+
+    // Sequence-of-bytes
+    {
+      final ByteSequenceOutputs outputs = ByteSequenceOutputs.getSingleton();
+      final BytesRef NO_OUTPUT = outputs.getNoOutput();      
+      final List<FSTTester.InputOutput<BytesRef>> pairs = new ArrayList<FSTTester.InputOutput<BytesRef>>(terms.length);
+      for(int idx=0;idx<terms.length;idx++) {
+        final BytesRef output = random.nextInt(30) == 17 ? NO_OUTPUT : new BytesRef(Integer.toString(idx));
+        pairs.add(new FSTTester.InputOutput<BytesRef>(terms[idx], output));
+      }
+      new FSTTester<BytesRef>(random, dir, inputMode, pairs, outputs).doTest();
+    }
+
+    // Sequence-of-ints
+    {
+      final IntSequenceOutputs outputs = IntSequenceOutputs.getSingleton();
+      final List<FSTTester.InputOutput<IntsRef>> pairs = new ArrayList<FSTTester.InputOutput<IntsRef>>(terms.length);
+      for(int idx=0;idx<terms.length;idx++) {
+        final String s = Integer.toString(idx);
+        final IntsRef output = new IntsRef(s.length());
+        output.length = s.length();
+        for(int idx2=0;idx2<output.length;idx2++) {
+          output.ints[idx2] = s.charAt(idx2);
+        }
+        pairs.add(new FSTTester.InputOutput<IntsRef>(terms[idx], output));
+      }
+      new FSTTester<IntsRef>(random, dir, inputMode, pairs, outputs).doTest();
+    }
+
+    // Up to two positive ints, shared, generally but not
+    // monotonically increasing
+    {
+      if (VERBOSE) {
+        System.out.println("TEST: now test UpToTwoPositiveIntOutputs");
+      }
+      final UpToTwoPositiveIntOutputs outputs = UpToTwoPositiveIntOutputs.getSingleton(true);
+      final List<FSTTester.InputOutput<Object>> pairs = new ArrayList<FSTTester.InputOutput<Object>>(terms.length);
+      long lastOutput = 0;
+      for(int idx=0;idx<terms.length;idx++) {
+        // Sometimes go backwards
+        long value = lastOutput + _TestUtil.nextInt(random, -100, 1000);
+        while(value < 0) {
+          value = lastOutput + _TestUtil.nextInt(random, -100, 1000);
+        }
+        final Object output;
+        if (random.nextInt(5) == 3) {
+          long value2 = lastOutput + _TestUtil.nextInt(random, -100, 1000);
+          while(value2 < 0) {
+            value2 = lastOutput + _TestUtil.nextInt(random, -100, 1000);
+          }
+          output = outputs.get(value, value2);
+        } else {
+          output = outputs.get(value);
+        }
+        pairs.add(new FSTTester.InputOutput<Object>(terms[idx], output));
+      }
+      new FSTTester<Object>(random, dir, inputMode, pairs, outputs).doTest();
+    }
+  }
+
+  private static class FSTTester<T> {
+
+    final Random random;
+    final List<InputOutput<T>> pairs;
+    final int inputMode;
+    final Outputs<T> outputs;
+    final Directory dir;
+
+    public FSTTester(Random random, Directory dir, int inputMode, List<InputOutput<T>> pairs, Outputs<T> outputs) {
+      this.random = random;
+      this.dir = dir;
+      this.inputMode = inputMode;
+      this.pairs = pairs;
+      this.outputs = outputs;
+    }
+
+    private static class InputOutput<T> implements Comparable<InputOutput<T>> {
+      public final IntsRef input;
+      public final T output;
+
+      public InputOutput(IntsRef input, T output) {
+        this.input = input;
+        this.output = output;
+      }
+
+      public int compareTo(InputOutput<T> other) {
+        if (other instanceof InputOutput) {
+          return input.compareTo((other).input);
+        } else {
+          throw new IllegalArgumentException();
+        }
+      }
+    }
+
+    public void doTest() throws IOException {
+      // no pruning
+      doTest(0, 0, true);
+
+      if (!(outputs instanceof UpToTwoPositiveIntOutputs)) {
+        // simple pruning
+        doTest(_TestUtil.nextInt(random, 1, 1+pairs.size()), 0, true);
+        
+        // leafy pruning
+        doTest(0, _TestUtil.nextInt(random, 1, 1+pairs.size()), true);
+      }
+    }
+
+    // runs the term, returning the output, or null if term
+    // isn't accepted.  if prefixLength is non-null it must be
+    // length 1 int array; prefixLength[0] is set to the length
+    // of the term prefix that matches
+    private T run(FST<T> fst, IntsRef term, int[] prefixLength) throws IOException {
+      assert prefixLength == null || prefixLength.length == 1;
+      final FST.Arc<T> arc = fst.getFirstArc(new FST.Arc<T>());
+      final T NO_OUTPUT = fst.outputs.getNoOutput();
+      T output = NO_OUTPUT;
+
+      for(int i=0;i<=term.length;i++) {
+        final int label;
+        if (i == term.length) {
+          label = FST.END_LABEL;
+        } else {
+          label = term.ints[term.offset+i];
+        }
+        //System.out.println("   loop i=" + i + " label=" + label + " output=" + fst.outputs.outputToString(output) + " curArc: target=" + arc.target + " isFinal?=" + arc.isFinal());
+        if (fst.findTargetArc(label, arc, arc) == null) {
+          if (prefixLength != null) {
+            prefixLength[0] = i;
+            return output;
+          } else {
+            return null;
+          }
+        }
+        output = fst.outputs.add(output, arc.output);
+      }
+
+      if (prefixLength != null) {
+        prefixLength[0] = term.length;
+      }
+
+      return output;
+    }
+
+    private T randomAcceptedWord(FST<T> fst, IntsRef in) throws IOException {
+      FST.Arc<T> arc = fst.getFirstArc(new FST.Arc<T>());
+
+      final List<FST.Arc<T>> arcs = new ArrayList<FST.Arc<T>>();
+      in.length = 0;
+      in.offset = 0;
+      final T NO_OUTPUT = fst.outputs.getNoOutput();
+      T output = NO_OUTPUT;
+
+      while(true) {
+        // read all arcs:
+        fst.readFirstTargetArc(arc, arc);
+        arcs.add(new FST.Arc<T>().copyFrom(arc));
+        while(!arc.isLast()) {
+          fst.readNextArc(arc);
+          arcs.add(new FST.Arc<T>().copyFrom(arc));
+        }
+      
+        // pick one
+        arc = arcs.get(random.nextInt(arcs.size()));
+        arcs.clear();
+
+        // accumulate output
+        output = fst.outputs.add(output, arc.output);
+
+        // append label
+        if (arc.label == FST.END_LABEL) {
+          break;
+        }
+
+        if (in.ints.length == in.length) {
+          in.grow(1+in.length);
+        }
+        in.ints[in.length++] = arc.label;
+      }
+
+      return output;
+    }
+
+
+    FST<T> doTest(int prune1, int prune2, boolean allowRandomSuffixSharing) throws IOException {
+      if (VERBOSE) {
+        System.out.println("TEST: prune1=" + prune1 + " prune2=" + prune2);
+      }
+
+      final Builder<T> builder = new Builder<T>(inputMode == 0 ? FST.INPUT_TYPE.BYTE1 : FST.INPUT_TYPE.BYTE4,
+                                                prune1, prune2,
+                                                prune1==0 && prune2==0,
+                                                allowRandomSuffixSharing ? random.nextBoolean() : true,
+                                                allowRandomSuffixSharing ? _TestUtil.nextInt(random, 1, 10) : Integer.MAX_VALUE,
+                                                outputs);
+
+      for(InputOutput<T> pair : pairs) {
+        if (pair.output instanceof UpToTwoPositiveIntOutputs.TwoLongs) {
+          final UpToTwoPositiveIntOutputs _outputs = (UpToTwoPositiveIntOutputs) outputs;
+          final UpToTwoPositiveIntOutputs.TwoLongs twoLongs = (UpToTwoPositiveIntOutputs.TwoLongs) pair.output;
+          @SuppressWarnings("unchecked") final Builder<Object> builderObject = (Builder<Object>) builder;
+          builderObject.add(pair.input, _outputs.get(twoLongs.first));
+          builderObject.add(pair.input, _outputs.get(twoLongs.second));
+        } else {
+          builder.add(pair.input, pair.output);
+        }
+      }
+      FST<T> fst = builder.finish();
+
+      if (random.nextBoolean() && fst != null) {
+        IndexOutput out = dir.createOutput("fst.bin");
+        fst.save(out);
+        out.close();
+        IndexInput in = dir.openInput("fst.bin");
+        try {
+          fst = new FST<T>(in, outputs);
+        } finally {
+          in.close();
+          dir.deleteFile("fst.bin");
+        }
+      }
+
+      if (VERBOSE && pairs.size() <= 20 && fst != null) {
+        Writer w = new OutputStreamWriter(new FileOutputStream("out.dot"), "UTF-8");
+        Util.toDot(fst, w, false, false);
+        w.close();
+        System.out.println("SAVED out.dot");
+      }
+
+      if (VERBOSE) {
+        if (fst == null) {
+          System.out.println("  fst has 0 nodes (fully pruned)");
+        } else {
+          System.out.println("  fst has " + fst.getNodeCount() + " nodes and " + fst.getArcCount() + " arcs");
+        }
+      }
+
+      if (prune1 == 0 && prune2 == 0) {
+        verifyUnPruned(inputMode, fst);
+      } else {
+        verifyPruned(inputMode, fst, prune1, prune2);
+      }
+
+      return fst;
+    }
+
+    // FST is complete
+    private void verifyUnPruned(int inputMode, FST<T> fst) throws IOException {
+
+      if (pairs.size() == 0) {
+        assertNull(fst);
+        return;
+      }
+
+      if (VERBOSE) {
+        System.out.println("TEST: now verify " + pairs.size() + " terms");
+        for(InputOutput<T> pair : pairs) {
+          assertNotNull(pair);
+          assertNotNull(pair.input);
+          assertNotNull(pair.output);
+          System.out.println("  " + inputToString(inputMode, pair.input) + ": " + outputs.outputToString(pair.output));
+        }
+      }
+
+      assertNotNull(fst);
+
+      // visit valid paris in order -- make sure all words
+      // are accepted, and FSTEnum's next() steps through
+      // them correctly
+      if (VERBOSE) {
+        System.out.println("TEST: check valid terms/next()");
+      }
+      {
+        IntsRefFSTEnum<T> fstEnum = new IntsRefFSTEnum<T>(fst);
+        for(InputOutput<T> pair : pairs) {
+          IntsRef term = pair.input;
+          if (VERBOSE) {
+            System.out.println("TEST: check term=" + inputToString(inputMode, term) + " output=" + fst.outputs.outputToString(pair.output));
+          }
+          Object output = run(fst, term, null);
+
+          assertNotNull("term " + inputToString(inputMode, term) + " is not accepted", output);
+          assertEquals(pair.output, output);
+
+          // verify enum's next
+          IntsRefFSTEnum.InputOutput<T> t = fstEnum.next();
+          assertNotNull(t);
+          assertEquals("expected input=" + inputToString(inputMode, term) + " but fstEnum returned " + inputToString(inputMode, t.input), term, t.input);
+          assertEquals(pair.output, t.output);
+        }
+        assertNull(fstEnum.next());
+      }
+
+      final Map<IntsRef,T> termsMap = new HashMap<IntsRef,T>();
+      for(InputOutput<T> pair : pairs) {
+        termsMap.put(pair.input, pair.output);
+      }
+
+      // find random matching word and make sure it's valid
+      if (VERBOSE) {
+        System.out.println("TEST: verify random accepted terms");
+      }
+      final IntsRef scratch = new IntsRef(10);
+      int num = atLeast(500);
+      for(int iter=0;iter<num;iter++) {
+        T output = randomAcceptedWord(fst, scratch);
+        assertTrue("accepted word " + inputToString(inputMode, scratch) + " is not valid", termsMap.containsKey(scratch));
+        assertEquals(termsMap.get(scratch), output);
+      }
+    
+      // test IntsRefFSTEnum.seek:
+      if (VERBOSE) {
+        System.out.println("TEST: verify seek");
+      }
+      IntsRefFSTEnum<T> fstEnum = new IntsRefFSTEnum<T>(fst);
+      num = atLeast(100);
+      for(int iter=0;iter<num;iter++) {
+        if (VERBOSE) {
+          System.out.println("TEST: iter=" + iter);
+        }
+        if (random.nextBoolean()) {
+          // seek to term that doesn't exist:
+          while(true) {
+            final IntsRef term = toIntsRef(getRandomString(), inputMode);
+            int pos = Collections.binarySearch(pairs, new InputOutput<T>(term, null));
+            if (pos < 0) {
+              pos = -(pos+1);
+              // ok doesn't exist
+              //System.out.println("  seek " + inputToString(inputMode, term));
+              final IntsRefFSTEnum.InputOutput<T> seekResult;
+              if (random.nextBoolean()) {
+                if (VERBOSE) {
+                  System.out.println("  do non-exist seekFloor term=" + inputToString(inputMode, term));
+                }
+                seekResult = fstEnum.seekFloor(term);
+                pos--;
+              } else {
+                if (VERBOSE) {
+                  System.out.println("  do non-exist seekCeil term=" + inputToString(inputMode, term));
+                }
+                seekResult = fstEnum.seekCeil(term);
+              }
+
+              if (pos != -1 && pos < pairs.size()) {
+                //System.out.println("    got " + inputToString(inputMode,seekResult.input) + " output=" + fst.outputs.outputToString(seekResult.output));
+                assertNotNull("got null but expected term=" + inputToString(inputMode, pairs.get(pos).input), seekResult);
+                if (VERBOSE) {
+                  System.out.println("    got " + inputToString(inputMode, seekResult.input));
+                }
+                assertEquals("expected " + inputToString(inputMode, pairs.get(pos).input) + " but got " + inputToString(inputMode, seekResult.input), pairs.get(pos).input, seekResult.input);
+                assertEquals(pairs.get(pos).output, seekResult.output);
+              } else {
+                // seeked before start or beyond end
+                //System.out.println("seek=" + seekTerm);
+                assertNull("expected null but got " + (seekResult==null ? "null" : inputToString(inputMode, seekResult.input)), seekResult);
+                if (VERBOSE) {
+                  System.out.println("    got null");
+                }
+              }
+
+              break;
+            }
+          }
+        } else {
+          // seek to term that does exist:
+          InputOutput<T> pair = pairs.get(random.nextInt(pairs.size()));
+          final IntsRefFSTEnum.InputOutput<T> seekResult;
+          if (random.nextBoolean()) {
+            if (VERBOSE) {
+              System.out.println("  do exists seekFloor " + inputToString(inputMode, pair.input));
+            }
+            seekResult = fstEnum.seekFloor(pair.input);
+          } else {
+            if (VERBOSE) {
+              System.out.println("  do exists seekCeil " + inputToString(inputMode, pair.input));
+            }
+            seekResult = fstEnum.seekCeil(pair.input);
+          }
+          assertNotNull(seekResult);
+          assertEquals("got " + inputToString(inputMode, seekResult.input) + " but expected " + inputToString(inputMode, pair.input), pair.input, seekResult.input);
+          assertEquals(pair.output, seekResult.output);
+        }
+      }
+
+      if (VERBOSE) {
+        System.out.println("TEST: mixed next/seek");
+      }
+
+      // test mixed next/seek
+      num = atLeast(100);
+      for(int iter=0;iter<num;iter++) {
+        if (VERBOSE) {
+          System.out.println("TEST: iter " + iter);
+        }
+        // reset:
+        fstEnum = new IntsRefFSTEnum<T>(fst);
+        int upto = -1;
+        while(true) {
+          boolean isDone = false;
+          if (upto == pairs.size()-1 || random.nextBoolean()) {
+            // next
+            upto++;
+            if (VERBOSE) {
+              System.out.println("  do next");
+            }
+            isDone = fstEnum.next() == null;
+          } else if (upto != -1 && upto < 0.75 * pairs.size() && random.nextBoolean()) {
+            int attempt = 0;
+            for(;attempt<10;attempt++) {
+              IntsRef term = toIntsRef(getRandomString(), inputMode);
+              if (!termsMap.containsKey(term) && term.compareTo(pairs.get(upto).input) > 0) {
+                int pos = Collections.binarySearch(pairs, new InputOutput<T>(term, null));
+                assert pos < 0;
+                upto = -(pos+1);
+
+                if (random.nextBoolean()) {
+                  upto--;
+                  assertTrue(upto != -1);
+                  if (VERBOSE) {
+                    System.out.println("  do non-exist seekFloor(" + inputToString(inputMode, term) + ")");
+                  }
+                  isDone = fstEnum.seekFloor(term) == null;
+                } else {
+                  if (VERBOSE) {
+                    System.out.println("  do non-exist seekCeil(" + inputToString(inputMode, term) + ")");
+                  }
+                  isDone = fstEnum.seekCeil(term) == null;
+                }
+
+                break;
+              }
+            }
+            if (attempt == 10) {
+              continue;
+            }
+            
+          } else {
+            final int inc = random.nextInt(pairs.size() - upto - 1);
+            upto += inc;
+            if (upto == -1) {
+              upto = 0;
+            }
+
+            if (random.nextBoolean()) {
+              if (VERBOSE) {
+                System.out.println("  do advanceCeil(" + inputToString(inputMode, pairs.get(upto).input) + ")");
+              }
+              isDone = fstEnum.seekCeil(pairs.get(upto).input) == null;
+            } else {
+              if (VERBOSE) {
+                System.out.println("  do advanceFloor(" + inputToString(inputMode, pairs.get(upto).input) + ")");
+              }
+              isDone = fstEnum.seekFloor(pairs.get(upto).input) == null;
+            }
+          }
+          if (VERBOSE) {
+            if (!isDone) {
+              System.out.println("    got " + inputToString(inputMode, fstEnum.current().input));
+            } else {
+              System.out.println("    got null");
+            }
+          }
+
+          if (upto == pairs.size()) {
+            assertTrue(isDone);
+            break;
+          } else {
+            assertFalse(isDone);
+            assertEquals(pairs.get(upto).input, fstEnum.current().input);
+            assertEquals(pairs.get(upto).output, fstEnum.current().output);
+
+            /*
+            if (upto < pairs.size()-1) {
+              int tryCount = 0;
+              while(tryCount < 10) {
+                final IntsRef t = toIntsRef(getRandomString(), inputMode);
+                if (pairs.get(upto).input.compareTo(t) < 0) {
+                  final boolean expected = t.compareTo(pairs.get(upto+1).input) < 0;
+                  if (VERBOSE) {
+                    System.out.println("TEST: call beforeNext(" + inputToString(inputMode, t) + "); current=" + inputToString(inputMode, pairs.get(upto).input) + " next=" + inputToString(inputMode, pairs.get(upto+1).input) + " expected=" + expected);
+                  }
+                  assertEquals(expected, fstEnum.beforeNext(t));
+                  break;
+                }
+                tryCount++;
+              }
+            }
+            */
+          }
+        }
+      }
+    }
+
+    private static class CountMinOutput<T> {
+      int count;
+      T output;
+      T finalOutput;
+      boolean isLeaf = true;
+      boolean isFinal;
+    }
+
+    // FST is pruned
+    private void verifyPruned(int inputMode, FST<T> fst, int prune1, int prune2) throws IOException {
+
+      if (VERBOSE) {
+        System.out.println("TEST: now verify pruned " + pairs.size() + " terms; outputs=" + outputs);
+        for(InputOutput<T> pair : pairs) {
+          System.out.println("  " + inputToString(inputMode, pair.input) + ": " + outputs.outputToString(pair.output));
+        }
+      }
+
+      // To validate the FST, we brute-force compute all prefixes
+      // in the terms, matched to their "common" outputs, prune that
+      // set according to the prune thresholds, then assert the FST
+      // matches that same set.
+
+      // NOTE: Crazy RAM intensive!!
+
+      //System.out.println("TEST: tally prefixes");
+
+      // build all prefixes
+      final Map<IntsRef,CountMinOutput<T>> prefixes = new HashMap<IntsRef,CountMinOutput<T>>();
+      final IntsRef scratch = new IntsRef(10);
+      for(InputOutput<T> pair: pairs) {
+        scratch.copy(pair.input);
+        for(int idx=0;idx<=pair.input.length;idx++) {
+          scratch.length = idx;
+          CountMinOutput<T> cmo = prefixes.get(scratch);
+          if (cmo == null) {
+            cmo = new CountMinOutput<T>();
+            cmo.count = 1;
+            cmo.output = pair.output;
+            prefixes.put(new IntsRef(scratch), cmo);
+          } else {
+            cmo.count++;
+            cmo.output = outputs.common(cmo.output, pair.output);
+          }
+          if (idx == pair.input.length) {
+            cmo.isFinal = true;
+            cmo.finalOutput = cmo.output;
+          }
+        }
+      }
+
+      if (VERBOSE) {
+        System.out.println("TEST: now prune");
+      }
+
+      // prune 'em
+      final Iterator<Map.Entry<IntsRef,CountMinOutput<T>>> it = prefixes.entrySet().iterator();
+      while(it.hasNext()) {
+        Map.Entry<IntsRef,CountMinOutput<T>> ent = it.next();
+        final IntsRef prefix = ent.getKey();
+        final CountMinOutput<T> cmo = ent.getValue();
+        if (VERBOSE) {
+          System.out.println("  term prefix=" + inputToString(inputMode, prefix, false) + " count=" + cmo.count + " isLeaf=" + cmo.isLeaf + " output=" + outputs.outputToString(cmo.output) + " isFinal=" + cmo.isFinal);
+        }
+        final boolean keep;
+        if (prune1 > 0) {
+          keep = cmo.count >= prune1;
+        } else {
+          assert prune2 > 0;
+          if (prune2 > 1 && cmo.count >= prune2) {
+            keep = true;
+          } else if (prefix.length > 0) {
+            // consult our parent
+            scratch.length = prefix.length-1;
+            System.arraycopy(prefix.ints, prefix.offset, scratch.ints, 0, scratch.length);
+            final CountMinOutput<T> cmo2 = prefixes.get(scratch);
+            //System.out.println("    parent count = " + (cmo2 == null ? -1 : cmo2.count));
+            keep = cmo2 != null && ((prune2 > 1 && cmo2.count >= prune2) || (prune2 == 1 && (cmo2.count >= 2 || prefix.length <= 1)));
+          } else if (cmo.count >= prune2) {
+            keep = true;
+          } else {
+            keep = false;
+          }
+        }
+
+        if (!keep) {
+          it.remove();
+          //System.out.println("    remove");
+        } else {
+          // clear isLeaf for all ancestors
+          //System.out.println("    keep");
+          scratch.copy(prefix);
+          scratch.length--;
+          while(scratch.length >= 0) {
+            final CountMinOutput<T> cmo2 = prefixes.get(scratch);
+            if (cmo2 != null) {
+              //System.out.println("    clear isLeaf " + inputToString(inputMode, scratch));
+              cmo2.isLeaf = false;
+            }
+            scratch.length--;
+          }
+        }
+      }
+
+      //System.out.println("TEST: after prune");
+      /*
+        for(Map.Entry<BytesRef,CountMinOutput> ent : prefixes.entrySet()) {
+        System.out.println("  " + inputToString(inputMode, ent.getKey()) + ": isLeaf=" + ent.getValue().isLeaf + " isFinal=" + ent.getValue().isFinal);
+        if (ent.getValue().isFinal) {
+        System.out.println("    finalOutput=" + outputs.outputToString(ent.getValue().finalOutput));
+        }
+        }
+      */
+
+      if (prefixes.size() <= 1) {
+        assertNull(fst);
+        return;
+      }
+
+      assertNotNull(fst);
+
+      // make sure FST only enums valid prefixes
+      if (VERBOSE) {
+        System.out.println("TEST: check pruned enum");
+      }
+      IntsRefFSTEnum<T> fstEnum = new IntsRefFSTEnum<T>(fst);
+      IntsRefFSTEnum.InputOutput<T> current;
+      while((current = fstEnum.next()) != null) {
+        if (VERBOSE) {
+          System.out.println("  fstEnum.next prefix=" + inputToString(inputMode, current.input, false) + " output=" + outputs.outputToString(current.output));
+        }
+        final CountMinOutput cmo = prefixes.get(current.input);
+        assertNotNull(cmo);
+        assertTrue(cmo.isLeaf || cmo.isFinal);
+        //if (cmo.isFinal && !cmo.isLeaf) {
+        if (cmo.isFinal) {
+          assertEquals(cmo.finalOutput, current.output);
+        } else {
+          assertEquals(cmo.output, current.output);
+        }
+      }
+
+      // make sure all non-pruned prefixes are present in the FST
+      if (VERBOSE) {
+        System.out.println("TEST: verify all prefixes");
+      }
+      final int[] stopNode = new int[1];
+      for(Map.Entry<IntsRef,CountMinOutput<T>> ent : prefixes.entrySet()) {
+        if (ent.getKey().length > 0) {
+          final CountMinOutput<T> cmo = ent.getValue();
+          final T output = run(fst, ent.getKey(), stopNode);
+          if (VERBOSE) {
+            System.out.println("TEST: verify prefix=" + inputToString(inputMode, ent.getKey(), false) + " output=" + outputs.outputToString(cmo.output));
+          }
+          // if (cmo.isFinal && !cmo.isLeaf) {
+          if (cmo.isFinal) {
+            assertEquals(cmo.finalOutput, output);
+          } else {
+            assertEquals(cmo.output, output);
+          }
+          assertEquals(ent.getKey().length, stopNode[0]);
+        }
+      }
+    }
+  }
+
+  public void testRandomWords() throws IOException {
+    testRandomWords(1000, atLeast(2));
+    //testRandomWords(20, 100);
+  }
+
+  private String inputModeToString(int mode) {
+    if (mode == 0) {
+      return "utf8";
+    } else {
+      return "utf32";
+    }
+  }
+
+  private void testRandomWords(int maxNumWords, int numIter) throws IOException {
+    for(int iter=0;iter<numIter;iter++) {
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter " + iter);
+      }
+      for(int inputMode=0;inputMode<2;inputMode++) {
+        final int numWords = random.nextInt(maxNumWords+1);
+        Set<IntsRef> termsSet = new HashSet<IntsRef>();
+        IntsRef[] terms = new IntsRef[numWords];
+        while(termsSet.size() < numWords) {
+          final String term = getRandomString();
+          termsSet.add(toIntsRef(term, inputMode));
+        }
+        doTest(inputMode, termsSet.toArray(new IntsRef[termsSet.size()]));
+      }
+    }
+  }
+
+  static String getRandomString() {
+    final String term;
+    if (random.nextBoolean()) {
+      term = _TestUtil.randomRealisticUnicodeString(random);
+    } else {
+      // we want to mix in limited-alphabet symbols so
+      // we get more sharing of the nodes given how few
+      // terms we are testing...
+      term = simpleRandomString(random);
+    }
+    return term;
+  }
+
+  @Nightly
+  public void testBigSet() throws IOException {
+    testRandomWords(_TestUtil.nextInt(random, 50000, 60000), 1);
+  }
+  
+  private static String inputToString(int inputMode, IntsRef term) {
+    return inputToString(inputMode, term, true);
+  }
+
+  private static String inputToString(int inputMode, IntsRef term, boolean isValidUnicode) {
+    if (!isValidUnicode) {
+      return term.toString();
+    } else if (inputMode == 0) {
+      // utf8
+      return toBytesRef(term).utf8ToString() + " " + term;
+    } else {
+      // utf32
+      return UnicodeUtil.newString(term.ints, term.offset, term.length) + " " + term;
+    }
+  }
+
+  private static IntsRef toIntsRef(String s) {
+    final int charCount = s.length();
+    IntsRef ir = new IntsRef(charCount);
+    for(int charIDX=0;charIDX<charCount;charIDX++) {
+      ir.ints[charIDX] = s.charAt(charIDX);
+    }
+    ir.length = charCount;
+    return ir;
+  }
+
+  private static String toString(IntsRef ints) {
+    char[] chars = new char[ints.length];
+    for(int charIDX=0;charIDX<ints.length;charIDX++) {
+      final int ch = ints.ints[ints.offset+charIDX];
+      assertTrue(ch >= 0 && ch < 65536);
+      chars[charIDX] = (char) ch;
+    }
+    return new String(chars);
+  }
+
+  // Build FST for all unique terms in the test line docs
+  // file, up until a time limit
+  public void testRealTerms() throws Exception {
+
+    /*
+    if (CodecProvider.getDefault().getDefaultFieldCodec().equals("SimpleText")) {
+      // no
+      CodecProvider.getDefault().setDefaultFieldCodec("Standard");
+    }
+    */
+
+    final LineFileDocs docs = new LineFileDocs(random);
+    final int RUN_TIME_MSEC = atLeast(500);
+    final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(-1).setRAMBufferSizeMB(64);
+    final File tempDir = _TestUtil.getTempDir("fstlines");
+    final MockDirectoryWrapper dir = newFSDirectory(tempDir);
+    final IndexWriter writer = new IndexWriter(dir, conf);
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    final long stopTime = System.currentTimeMillis() + RUN_TIME_MSEC;
+    Document doc;
+    int docCount = 0;
+    while((doc = docs.nextDoc()) != null && System.currentTimeMillis() < stopTime) {
+      writer.addDocument(doc);
+      docCount++;
+    }
+    IndexReader r = IndexReader.open(writer, true);
+    writer.close();
+    final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(random.nextBoolean());
+    Builder<Long> builder = new Builder<Long>(FST.INPUT_TYPE.BYTE2, outputs);
+
+    boolean storeOrd = false;
+    if (VERBOSE) {
+      if (storeOrd) {
+        System.out.println("FST stores ord");
+      } else {
+        System.out.println("FST stores docFreq");
+      }
+    }
+    TermEnum termEnum = r.terms(new Term("body", ""));
+    if (VERBOSE) {
+      System.out.println("TEST: got termEnum=" + termEnum);
+    }
+    int ord = 0;
+    while(true) {
+      final Term term = termEnum.term();
+      if (term == null || !"body".equals(term.field())) {
+        break;
+      }
+
+      // No ord in 3.x:
+      /*
+      if (ord == 0) {
+        try {
+          termsEnum.ord();
+        } catch (UnsupportedOperationException uoe) {
+          if (VERBOSE) {
+            System.out.println("TEST: codec doesn't support ord; FST stores docFreq");
+          }
+          storeOrd = false;
+        }
+      }
+      */
+      final int output;
+      if (storeOrd) {
+        output = ord;
+      } else {
+        output = termEnum.docFreq();
+      }
+      //System.out.println("ADD: " + term.text() + " ch[0]=" + (term.text().length() == 0 ? -1 : term.text().charAt(0)));
+      builder.add(toIntsRef(term.text()), outputs.get(output));
+      ord++;
+      if (VERBOSE && ord % 100000 == 0 && LuceneTestCase.TEST_NIGHTLY) {
+        System.out.println(ord + " terms...");
+      }
+      termEnum.next();
+    }
+    final FST<Long> fst = builder.finish();
+    if (VERBOSE) {
+      System.out.println("FST: " + docCount + " docs; " + ord + " terms; " + fst.getNodeCount() + " nodes; " + fst.getArcCount() + " arcs;" + " " + fst.sizeInBytes() + " bytes");
+    }
+
+    if (ord > 0) {
+      // Now confirm BytesRefFSTEnum and TermEnum act the
+      // same:
+      final IntsRefFSTEnum<Long> fstEnum = new IntsRefFSTEnum<Long>(fst);
+      int num = atLeast(1000);
+      for(int iter=0;iter<num;iter++) {
+        final String randomTerm = getRandomString();
+
+        if (VERBOSE) {
+          System.out.println("TEST: seek " + randomTerm + " ch[0]=" + (randomTerm.length() == 0 ? -1 : randomTerm.charAt(0)));
+        }
+
+        termEnum = r.terms(new Term("body", randomTerm));
+        final IntsRefFSTEnum.InputOutput fstSeekResult = fstEnum.seekCeil(toIntsRef(randomTerm));
+
+        if (termEnum.term() == null || !"body".equals(termEnum.term().field())) {
+          assertNull("got " + (fstSeekResult == null ? "null" : toString(fstSeekResult.input) + " but expected null"), fstSeekResult);
+        } else {
+          assertSame(termEnum, fstEnum, storeOrd);
+          for(int nextIter=0;nextIter<10;nextIter++) {
+            if (VERBOSE) {
+              System.out.println("TEST: next");
+              //if (storeOrd) {
+              //System.out.println("  ord=" + termEnum.ord());
+              //}
+            }
+            termEnum.next();
+            if (termEnum.term() != null && "body".equals(termEnum.term().field())) {
+              if (VERBOSE) {
+                System.out.println("  term=" + termEnum.term());
+              }
+              assertNotNull(fstEnum.next());
+              assertSame(termEnum, fstEnum, storeOrd);
+            } else {
+              if (VERBOSE) {
+                System.out.println("  end!");
+              }
+              IntsRefFSTEnum.InputOutput<Long> nextResult = fstEnum.next();
+              if (nextResult != null) {
+                System.out.println("expected null but got: input=" + toString(nextResult.input) + " output=" + outputs.outputToString(nextResult.output));
+                fail();
+              }
+              break;
+            }
+          }
+        }
+      }
+    }
+
+    r.close();
+    dir.close();
+  }
+
+  private void assertSame(TermEnum termEnum, IntsRefFSTEnum fstEnum, boolean storeOrd) throws Exception {
+    if (termEnum.term() == null || !"body".equals(termEnum.term().field())) {
+      if (fstEnum.current() != null) {
+        fail("fstEnum.current().input=" + toString(fstEnum.current().input));
+      }
+    } else {
+      assertNotNull(fstEnum.current());
+      assertEquals(termEnum.term() + " != " + toString(fstEnum.current().input), termEnum.term().text(), toString(fstEnum.current().input));
+      if (storeOrd) {
+        // fst stored the ord
+        // No ord in 3.x
+        // assertEquals(termEnum.ord(), ((Long) fstEnum.current().output).longValue());
+      } else {
+        // fst stored the docFreq
+        assertEquals(termEnum.docFreq(), (int) (((Long) fstEnum.current().output).longValue()));
+      }
+    }
+  }
+
+  private static abstract class VisitTerms<T> {
+    private final String dirOut;
+    private final String wordsFileIn;
+    private int inputMode;
+    private final Outputs<T> outputs;
+    private final Builder<T> builder;
+
+    public VisitTerms(String dirOut, String wordsFileIn, int inputMode, int prune, Outputs<T> outputs) {
+      this.dirOut = dirOut;
+      this.wordsFileIn = wordsFileIn;
+      this.inputMode = inputMode;
+      this.outputs = outputs;
+      
+      builder = new Builder<T>(inputMode == 0 ? FST.INPUT_TYPE.BYTE1 : FST.INPUT_TYPE.BYTE4, 0, prune, prune == 0, true, Integer.MAX_VALUE, outputs);
+    }
+
+    protected abstract T getOutput(IntsRef input, int ord) throws IOException;
+
+    public void run(int limit, boolean verify) throws IOException {
+      BufferedReader is = new BufferedReader(new InputStreamReader(new FileInputStream(wordsFileIn), "UTF-8"), 65536);
+      try {
+        final IntsRef intsRef = new IntsRef(10);
+        long tStart = System.currentTimeMillis();
+        int ord = 0;
+        while(true) {
+          String w = is.readLine();
+          if (w == null) {
+            break;
+          }
+          toIntsRef(w, inputMode, intsRef);
+          builder.add(intsRef,
+                      getOutput(intsRef, ord));
+
+          ord++;
+          if (ord % 500000 == 0) {
+            System.out.println(
+                String.format(Locale.ENGLISH, 
+                    "%6.2fs: %9d...", ((System.currentTimeMillis() - tStart) / 1000.0), ord));
+          }
+          if (ord >= limit) {
+            break;
+          }
+        }
+
+        assert builder.getTermCount() == ord;
+        final FST<T> fst = builder.finish();
+        if (fst == null) {
+          System.out.println("FST was fully pruned!");
+          System.exit(0);
+        }
+
+        if (dirOut == null)
+          return;
+
+        System.out.println(ord + " terms; " + fst.getNodeCount() + " nodes; " + fst.getArcCount() + " arcs; " + fst.getArcWithOutputCount() + " arcs w/ output; tot size " + fst.sizeInBytes());
+        if (fst.getNodeCount() < 100) {
+          Writer w = new OutputStreamWriter(new FileOutputStream("out.dot"), "UTF-8");
+          Util.toDot(fst, w, false, false);
+          w.close();
+          System.out.println("Wrote FST to out.dot");
+        }
+
+        Directory dir = FSDirectory.open(new File(dirOut));
+        IndexOutput out = dir.createOutput("fst.bin");
+        fst.save(out);
+        out.close();
+
+        System.out.println("Saved FST to fst.bin.");
+
+        if (!verify) {
+          return;
+        }
+
+        System.out.println("\nNow verify...");
+
+        is.close();
+        is = new BufferedReader(new InputStreamReader(new FileInputStream(wordsFileIn), "UTF-8"), 65536);
+
+        ord = 0;
+        tStart = System.currentTimeMillis();
+        while(true) {
+          String w = is.readLine();
+          if (w == null) {
+            break;
+          }
+          toIntsRef(w, inputMode, intsRef);
+          T expected = getOutput(intsRef, ord);
+          T actual = Util.get(fst, intsRef);
+          if (actual == null) {
+            throw new RuntimeException("unexpected null output on input=" + w);
+          }
+          if (!actual.equals(expected)) {
+            throw new RuntimeException("wrong output (got " + outputs.outputToString(actual) + " but expected " + outputs.outputToString(expected) + ") on input=" + w);
+          }
+
+          ord++;
+          if (ord % 500000 == 0) {
+            System.out.println(((System.currentTimeMillis()-tStart)/1000.0) + "s: " + ord + "...");
+          }
+          if (ord >= limit) {
+            break;
+          }
+        }
+
+        double totSec = ((System.currentTimeMillis() - tStart)/1000.0);
+        System.out.println("Verify took " + totSec + " sec + (" + (int) ((totSec*1000000000/ord)) + " nsec per lookup)");
+
+      } finally {
+        is.close();
+      }
+    }
+  }
+
+  // java -cp build/classes/test:build/classes/java:build/classes/test-framework:lib/junit-4.7.jar org.apache.lucene.util.fst.TestFSTs /x/tmp/allTerms3.txt out
+  public static void main(String[] args) throws IOException {
+    int prune = 0;
+    int limit = Integer.MAX_VALUE;
+    int inputMode = 0;                             // utf8
+    boolean storeOrds = false;
+    boolean storeDocFreqs = false;
+    boolean verify = true;
+    
+    String wordsFileIn = null;
+    String dirOut = null;
+
+    int idx = 0;
+    while (idx < args.length) {
+      if (args[idx].equals("-prune")) {
+        prune = Integer.valueOf(args[1 + idx]);
+        idx++;
+      } else if (args[idx].equals("-limit")) {
+        limit = Integer.valueOf(args[1 + idx]);
+        idx++;
+      } else if (args[idx].equals("-utf8")) {
+        inputMode = 0;
+      } else if (args[idx].equals("-utf32")) {
+        inputMode = 1;
+      } else if (args[idx].equals("-docFreq")) {
+        storeDocFreqs = true;
+      } else if (args[idx].equals("-ords")) {
+        storeOrds = true;
+      } else if (args[idx].equals("-noverify")) {
+        verify = false;
+      } else if (args[idx].startsWith("-")) {
+        System.err.println("Unrecognized option: " + args[idx]);
+        System.exit(-1);
+      } else {
+        if (wordsFileIn == null) {
+          wordsFileIn = args[idx];
+        } else if (dirOut == null) {
+          dirOut = args[idx];
+        } else {
+          System.err.println("Too many arguments, expected: input [output]");
+          System.exit(-1);
+        }
+      }
+      idx++;
+    }
+    
+    if (wordsFileIn == null) {
+      System.err.println("No input file.");
+      System.exit(-1);
+    }
+
+    // ord benefits from share, docFreqs don't:
+
+    if (storeOrds && storeDocFreqs) {
+      // Store both ord & docFreq:
+      final PositiveIntOutputs o1 = PositiveIntOutputs.getSingleton(true);
+      final PositiveIntOutputs o2 = PositiveIntOutputs.getSingleton(false);
+      final PairOutputs<Long,Long> outputs = new PairOutputs<Long,Long>(o1, o2);
+      new VisitTerms<PairOutputs.Pair<Long,Long>>(dirOut, wordsFileIn, inputMode, prune, outputs) {
+        Random rand;
+        @Override
+        public PairOutputs.Pair<Long,Long> getOutput(IntsRef input, int ord) {
+          if (ord == 0) {
+            rand = new Random(17);
+          }
+          return new PairOutputs.Pair<Long,Long>(o1.get(ord),
+                                                 o2.get(_TestUtil.nextInt(rand, 1, 5000)));
+        }
+      }.run(limit, verify);
+    } else if (storeOrds) {
+      // Store only ords
+      final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
+      new VisitTerms<Long>(dirOut, wordsFileIn, inputMode, prune, outputs) {
+        @Override
+        public Long getOutput(IntsRef input, int ord) {
+          return outputs.get(ord);
+        }
+      }.run(limit, verify);
+    } else if (storeDocFreqs) {
+      // Store only docFreq
+      final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(false);
+      new VisitTerms<Long>(dirOut, wordsFileIn, inputMode, prune, outputs) {
+        Random rand;
+        @Override
+        public Long getOutput(IntsRef input, int ord) {
+          if (ord == 0) {
+            rand = new Random(17);
+          }
+          return outputs.get(_TestUtil.nextInt(rand, 1, 5000));
+        }
+      }.run(limit, verify);
+    } else {
+      // Store nothing
+      final NoOutputs outputs = NoOutputs.getSingleton();
+      final Object NO_OUTPUT = outputs.getNoOutput();
+      new VisitTerms<Object>(dirOut, wordsFileIn, inputMode, prune, outputs) {
+        @Override
+        public Object getOutput(IntsRef input, int ord) {
+          return NO_OUTPUT;
+        }
+      }.run(limit, verify);
+    }
+  }
+
+  public void testSingleString() throws Exception {
+    final Outputs<Object> outputs = NoOutputs.getSingleton();
+    final Builder<Object> b = new Builder<Object>(FST.INPUT_TYPE.BYTE1, outputs);
+    b.add(new BytesRef("foobar"), outputs.getNoOutput());
+    final BytesRefFSTEnum<Object> fstEnum = new BytesRefFSTEnum<Object>(b.finish());
+    assertNull(fstEnum.seekFloor(new BytesRef("foo")));
+    assertNull(fstEnum.seekCeil(new BytesRef("foobaz")));
+  }
+
+  public void testSimple() throws Exception {
+
+    // Get outputs -- passing true means FST will share
+    // (delta code) the outputs.  This should result in
+    // smaller FST if the outputs grow monotonically.  But
+    // if numbers are "random", false should give smaller
+    // final size:
+    final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
+
+    // Build an FST mapping BytesRef -> Long
+    final Builder<Long> builder = new Builder<Long>(FST.INPUT_TYPE.BYTE1, outputs);
+
+    final BytesRef a = new BytesRef("a");
+    final BytesRef b = new BytesRef("b");
+    final BytesRef c = new BytesRef("c");
+
+    builder.add(a, outputs.get(17));
+    builder.add(b, outputs.get(42));
+    builder.add(c, outputs.get(13824324872317238L));
+
+    final FST<Long> fst = builder.finish();
+
+    assertEquals(13824324872317238L, (long) Util.get(fst, c));
+    assertEquals(42, (long) Util.get(fst, b));
+    assertEquals(17, (long) Util.get(fst, a));
+
+    BytesRefFSTEnum<Long> fstEnum = new BytesRefFSTEnum<Long>(fst);
+    BytesRefFSTEnum.InputOutput<Long> seekResult;
+    seekResult = fstEnum.seekFloor(a);
+    assertNotNull(seekResult);
+    assertEquals(17, (long) seekResult.output);
+
+    // goes to a
+    seekResult = fstEnum.seekFloor(new BytesRef("aa"));
+    assertNotNull(seekResult);
+    assertEquals(17, (long) seekResult.output);
+
+    // goes to b
+    seekResult = fstEnum.seekCeil(new BytesRef("aa"));
+    assertNotNull(seekResult);
+    assertEquals(b, seekResult.input);
+    assertEquals(42, (long) seekResult.output);
+  }
+
+  /**
+   * Test state expansion (array format) on close-to-root states. Creates
+   * synthetic input that has one expanded state on each level.
+   * 
+   * @see "https://issues.apache.org/jira/browse/LUCENE-2933" 
+   */
+  public void testExpandedCloseToRoot() throws Exception {
+    class SyntheticData {
+      FST<Object> compile(String[] lines) throws IOException {
+        final NoOutputs outputs = NoOutputs.getSingleton();
+        final Object nothing = outputs.getNoOutput();
+        final Builder<Object> b = new Builder<Object>(FST.INPUT_TYPE.BYTE1, outputs);
+
+        int line = 0;
+        final BytesRef term = new BytesRef();
+        while (line < lines.length) {
+          String w = lines[line++];
+          if (w == null) {
+            break;
+          }
+          term.copy(w);
+          b.add(term, nothing);
+        }
+        
+        return b.finish();
+      }
+      
+      void generate(ArrayList<String> out, StringBuilder b, char from, char to,
+          int depth) {
+        if (depth == 0 || from == to) {
+          String seq = b.toString() + "_" + out.size() + "_end";
+          out.add(seq);
+        } else {
+          for (char c = from; c <= to; c++) {
+            b.append(c);
+            generate(out, b, from, c == to ? to : from, depth - 1);
+            b.deleteCharAt(b.length() - 1);
+          }
+        }
+      }
+
+      public int verifyStateAndBelow(FST<Object> fst, Arc<Object> arc, int depth) 
+        throws IOException {
+        if (fst.targetHasArcs(arc)) {
+          int childCount = 0;
+          for (arc = fst.readFirstTargetArc(arc, arc);; 
+               arc = fst.readNextArc(arc), childCount++)
+          {
+            boolean expanded = fst.isExpandedTarget(arc);
+            int children = verifyStateAndBelow(fst, new FST.Arc<Object>().copyFrom(arc), depth + 1);
+
+            assertEquals(
+                expanded,
+                (depth <= FST.FIXED_ARRAY_SHALLOW_DISTANCE && 
+                    children >= FST.FIXED_ARRAY_NUM_ARCS_SHALLOW) ||
+                 children >= FST.FIXED_ARRAY_NUM_ARCS_DEEP);
+            if (arc.isLast()) break;
+          }
+
+          return childCount;
+        }
+        return 0;
+      }
+    }
+
+    // Sanity check.
+    assertTrue(FST.FIXED_ARRAY_NUM_ARCS_SHALLOW < FST.FIXED_ARRAY_NUM_ARCS_DEEP);
+    assertTrue(FST.FIXED_ARRAY_SHALLOW_DISTANCE >= 0);
+
+    SyntheticData s = new SyntheticData();
+
+    ArrayList<String> out = new ArrayList<String>();
+    StringBuilder b = new StringBuilder();
+    s.generate(out, b, 'a', 'i', 10);
+    String[] input = out.toArray(new String[out.size()]);
+    Arrays.sort(input);
+    FST<Object> fst = s.compile(input);
+    FST.Arc<Object> arc = fst.getFirstArc(new FST.Arc<Object>());
+    s.verifyStateAndBelow(fst, arc, 1);
+  }
+
+  // Make sure raw FST can differentiate between final vs
+  // non-final end nodes
+  public void testNonFinalStopNodes() throws Exception {
+    final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
+    final Long nothing = outputs.getNoOutput();
+    final Builder<Long> b = new Builder<Long>(FST.INPUT_TYPE.BYTE1, outputs);
+
+    final FST<Long> fst = new FST<Long>(FST.INPUT_TYPE.BYTE1, outputs);
+
+    final Builder.UnCompiledNode<Long> rootNode = new Builder.UnCompiledNode<Long>(b, 0);
+
+    // Add final stop node
+    {
+      final Builder.UnCompiledNode<Long> node = new Builder.UnCompiledNode<Long>(b, 0);
+      node.isFinal = true;
+      rootNode.addArc('a', node);
+      final Builder.CompiledNode frozen = new Builder.CompiledNode();
+      frozen.address = fst.addNode(node);
+      rootNode.arcs[0].nextFinalOutput = outputs.get(17);
+      rootNode.arcs[0].isFinal = true;
+      rootNode.arcs[0].output = nothing;
+      rootNode.arcs[0].target = frozen;
+    }
+
+    // Add non-final stop node
+    {
+      final Builder.UnCompiledNode<Long> node = new Builder.UnCompiledNode<Long>(b, 0);
+      rootNode.addArc('b', node);
+      final Builder.CompiledNode frozen = new Builder.CompiledNode();
+      frozen.address = fst.addNode(node);
+      rootNode.arcs[1].nextFinalOutput = nothing;
+      rootNode.arcs[1].output = outputs.get(42);
+      rootNode.arcs[1].target = frozen;
+    }
+
+    fst.finish(fst.addNode(rootNode));
+    
+    checkStopNodes(fst, outputs);
+
+    // Make sure it still works after save/load:
+    Directory dir = newDirectory();
+    IndexOutput out = dir.createOutput("fst");
+    fst.save(out);
+    out.close();
+
+    IndexInput in = dir.openInput("fst");
+    final FST<Long> fst2 = new FST<Long>(in, outputs);
+    checkStopNodes(fst2, outputs);
+    in.close();
+    dir.close();
+  }
+
+  private void checkStopNodes(FST<Long> fst, PositiveIntOutputs outputs) throws Exception {
+    final Long nothing = outputs.getNoOutput();
+    FST.Arc<Long> startArc = fst.getFirstArc(new FST.Arc<Long>());
+    assertEquals(nothing, startArc.output);
+    assertEquals(nothing, startArc.nextFinalOutput);
+
+    FST.Arc<Long> arc = fst.readFirstTargetArc(startArc, new FST.Arc<Long>());
+    assertEquals('a', arc.label);
+    assertEquals(17, arc.nextFinalOutput.longValue());
+    assertTrue(arc.isFinal());
+
+    arc = fst.readNextArc(arc);
+    assertEquals('b', arc.label);
+    assertFalse(arc.isFinal());
+    assertEquals(42, arc.output.longValue());
+  }
+}
diff --git a/lucene/backwards/src/test/org/apache/lucene/util/makeEuroparlLineFile.py b/lucene/backwards/src/test/org/apache/lucene/util/makeEuroparlLineFile.py
new file mode 100644
index 0000000..2cfda33
--- /dev/null
+++ b/lucene/backwards/src/test/org/apache/lucene/util/makeEuroparlLineFile.py
@@ -0,0 +1,137 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import glob
+import datetime
+import tarfile
+import re
+
+try:
+  sys.argv.remove('-verbose')
+  VERBOSE = True
+except ValueError:
+  VERBOSE = False
+
+try:
+  sys.argv.remove('-docPerParagraph')
+  docPerParagraph = True
+except ValueError:
+  docPerParagraph = False
+
+reChapterOnly = re.compile('^<CHAPTER ID=.*?>$')
+reTagOnly = re.compile('^<.*?>$')
+reNumberOnly = re.compile(r'^\d+\.?$')
+
+docCount = 0
+didEnglish = False
+
+def write(date, title, pending, fOut):
+  global docCount
+  body = ' '.join(pending).replace('\t', ' ').strip()
+  if len(body) > 0:
+    line = '%s\t%s\t%s\n' % (title, date, body)
+    fOut.write(line)
+    docCount += 1
+    del pending[:]
+    if VERBOSE:
+      print len(body)
+
+def processTar(fileName, fOut):
+
+  global didEnglish
+
+  t = tarfile.open(fileName, 'r:gz')
+  for ti in t:
+    if ti.isfile() and (not didEnglish or ti.name.find('/en/') == -1):
+
+      tup = ti.name.split('/')
+      lang = tup[1]
+      year = int(tup[2][3:5])
+      if year < 20:
+        year += 2000
+      else:
+        year += 1900
+
+      month = int(tup[2][6:8])
+      day = int(tup[2][9:11])
+      date = datetime.date(year=year, month=month, day=day)
+
+      if VERBOSE:
+        print
+        print '%s: %s' % (ti.name, date)
+      nextIsTitle = False
+      title = None
+      pending = []
+      for line in t.extractfile(ti).readlines():
+        line = line.strip()
+        if reChapterOnly.match(line) is not None:
+          if title is not None:
+            write(date, title, pending, fOut)
+          nextIsTitle = True
+          continue
+        if nextIsTitle:
+          if not reNumberOnly.match(line) and not reTagOnly.match(line):
+            title = line
+            nextIsTitle = False
+            if VERBOSE:
+              print '  title %s' % line
+          continue
+        if line.lower() == '<p>':
+          if docPerParagraph:
+            write(date, title, pending, fOut)
+          else:
+            pending.append('PARSEP')
+        elif not reTagOnly.match(line):
+          pending.append(line)
+      if title is not None and len(pending) > 0:
+        write(date, title, pending, fOut)
+
+  didEnglish = True
+  
+# '/x/lucene/data/europarl/all.lines.txt'
+dirIn = sys.argv[1]
+fileOut = sys.argv[2]
+  
+fOut = open(fileOut, 'wb')
+
+for fileName in glob.glob('%s/??-??.tgz' % dirIn):
+  if fileName.endswith('.tgz'):
+    print 'process %s; %d docs so far...' % (fileName, docCount)
+    processTar(fileName, fOut)
+
+print 'TOTAL: %s' % docCount
+
+#run something like this:
+"""
+
+# Europarl V5 makes 76,917 docs, avg 38.6 KB per
+python -u europarl.py /x/lucene/data/europarl /x/lucene/data/europarl/tmp.lines.txt
+shuf /x/lucene/data/europarl/tmp.lines.txt > /x/lucene/data/europarl/full.lines.txt
+rm /x/lucene/data/europarl/tmp.lines.txt
+
+# Run again, this time each paragraph is a doc:
+# Europarl V5 makes 5,607,746 paragraphs (one paragraph per line), avg 620 bytes per:
+python -u europarl.py /x/lucene/data/europarl /x/lucene/data/europarl/tmp.lines.txt -docPerParagraph
+shuf /x/lucene/data/europarl/tmp.lines.txt > /x/lucene/data/europarl/para.lines.txt
+rm /x/lucene/data/europarl/tmp.lines.txt
+
+# ~5.5 MB gzip'd:
+head -200 /x/lucene/data/europarl/full.lines.txt > tmp.txt
+head -10000 /x/lucene/data/europarl/para.lines.txt >> tmp.txt
+shuf tmp.txt > europarl.subset.txt
+rm -f tmp.txt
+gzip --best europarl.subset.txt
+"""