Merging with trunk.

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/solr5914@1585024 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/dev-tools/idea/.idea/libraries/JUnit.xml b/dev-tools/idea/.idea/libraries/JUnit.xml
index 58d305b..4f3e812 100644
--- a/dev-tools/idea/.idea/libraries/JUnit.xml
+++ b/dev-tools/idea/.idea/libraries/JUnit.xml
@@ -2,7 +2,7 @@
   <library name="JUnit">
     <CLASSES>
       <root url="jar://$PROJECT_DIR$/lucene/test-framework/lib/junit-4.10.jar!/" />
-      <root url="jar://$PROJECT_DIR$/lucene/test-framework/lib/randomizedtesting-runner-2.1.1.jar!/" />
+      <root url="jar://$PROJECT_DIR$/lucene/test-framework/lib/randomizedtesting-runner-2.1.3.jar!/" />
     </CLASSES>
     <JAVADOC />
     <SOURCES />
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 64b6c0d..0d029df 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -50,6 +50,9 @@
   via setReader.  
   (Benson Margulies via Robert Muir - pull request #16)
 
+* LUCENE-5527: The Collector API has been refactored to use a dedicated Collector
+  per leaf. (Shikhar Bhushan, Adrien Grand)
+
 Documentation
 
 * LUCENE-5392: Add/improve analysis package documentation to reflect
@@ -187,6 +190,8 @@
 
 * LUCENE-5543: Remove/deprecate Directory.fileExists (Mike McCandless)
 
+* LUCENE-5565: Refactor SpatialPrefixTree/Cell to not use Strings. (David Smiley)
+
 Optimizations
 
 * LUCENE-5468: HunspellStemFilter uses 10 to 100x less RAM. It also loads
@@ -231,6 +236,16 @@
 
 * LUCENE-5568: Benchmark module's "default.codec" option didn't work. (David Smiley)
 
+* LUCENE-5574: Closing a near-real-time reader no longer attempts to
+  delete unreferenced files if the original writer has been closed;
+  this could cause index corruption in certain cases where index files
+  were directly changed (deleted, overwritten, etc.) in the index
+  directory outside of Lucene.  (Simon Willnauer, Shai Erera, Robert
+  Muir, Mike McCandless)
+
+* LUCENE-5570: Don't let FSDirectory.sync() create new zero-byte files, instead throw
+  exception if a file is missing.  (Uwe Schindler, Mike McCandless, Robert Muir)
+
 Test Framework
 
 * LUCENE-5577: Temporary folder and file management (and cleanup facilities)
diff --git a/lucene/MIGRATE.txt b/lucene/MIGRATE.txt
index 6e26ad6..7cbc53f 100644
--- a/lucene/MIGRATE.txt
+++ b/lucene/MIGRATE.txt
@@ -12,3 +12,10 @@
 The constructor of Tokenizer no longer takes Reader, as this was a leftover
 from before it was reusable. See the org.apache.lucene.analysis package
 documentation for more details.
+
+## Refactored Collector API (LUCENE-5299)
+
+The Collector API has been refactored to use a different Collector instance
+per segment. It is possible to migrate existing collectors painlessly by
+extending SimpleCollector instead of Collector: SimpleCollector is a
+specialization of Collector that returns itself as a per-segment Collector.
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java b/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java
index a590a51..611d4f7 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java
@@ -29,6 +29,7 @@
 import java.util.Map;
 import java.util.regex.Matcher;
 
+import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.NoSuchDirectoryException;
 import org.apache.lucene.util.CollectionUtil;
@@ -262,6 +263,14 @@
     deleteCommits();
   }
 
+  private void ensureOpen() throws AlreadyClosedException {
+    if (writer == null) {
+      throw new AlreadyClosedException("this IndexWriter is closed");
+    } else {
+      writer.ensureOpen(false);
+    }
+  }
+
   public SegmentInfos getLastSegmentInfos() {
     return lastSegmentInfos;
   }
@@ -578,6 +587,7 @@
   void deleteFile(String fileName)
        throws IOException {
     assert locked();
+    ensureOpen();
     try {
       if (infoStream.isEnabled("IFD")) {
         infoStream.message("IFD", "delete \"" + fileName + "\"");
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
index 0c6ecb0..5702aaf 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
@@ -4566,8 +4566,7 @@
     deleter.revisitPolicy();
   }
 
-  // Called by DirectoryReader.doClose
-  synchronized void deletePendingFiles() throws IOException {
+  private synchronized void deletePendingFiles() throws IOException {
     deleter.deletePendingFiles();
   }
   
@@ -4665,10 +4664,12 @@
   }
   
   synchronized void incRefDeleter(SegmentInfos segmentInfos) throws IOException {
+    ensureOpen();
     deleter.incRef(segmentInfos, false);
   }
   
   synchronized void decRefDeleter(SegmentInfos segmentInfos) throws IOException {
+    ensureOpen();
     deleter.decRef(segmentInfos);
   }
   
diff --git a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
index fbe4376..4e4d8cd 100644
--- a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
@@ -25,6 +25,7 @@
 import java.util.List;
 import java.util.Map;
 
+import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.util.IOUtils;
@@ -365,11 +366,15 @@
     }
 
     if (writer != null) {
-      writer.decRefDeleter(segmentInfos);
-      
-      // Since we just closed, writer may now be able to
-      // delete unused files:
-      writer.deletePendingFiles();
+      try {
+        writer.decRefDeleter(segmentInfos);
+      } catch (AlreadyClosedException ex) {
+        // This is OK, it just means our original writer was
+        // closed before we were, and this may leave some
+        // un-referenced files in the index, which is
+        // harmless.  The next time IW is opened on the
+        // index, it will delete them.
+      }
     }
 
     // throw the first exception
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java b/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
index 7c2b6aa..173bb44 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
@@ -61,7 +61,7 @@
 
 final class BooleanScorer extends BulkScorer {
   
-  private static final class BooleanScorerCollector extends Collector {
+  private static final class BooleanScorerCollector extends SimpleCollector {
     private BucketTable bucketTable;
     private int mask;
     private Scorer scorer;
@@ -93,11 +93,6 @@
     }
     
     @Override
-    public void setNextReader(AtomicReaderContext context) {
-      // not needed by this implementation
-    }
-    
-    @Override
     public void setScorer(Scorer scorer) {
       this.scorer = scorer;
     }
@@ -136,7 +131,7 @@
       }
     }
 
-    public Collector newCollector(int mask) {
+    public LeafCollector newCollector(int mask) {
       return new BooleanScorerCollector(mask, this);
     }
 
@@ -148,12 +143,12 @@
     // TODO: re-enable this if BQ ever sends us required clauses
     //public boolean required = false;
     public boolean prohibited;
-    public Collector collector;
+    public LeafCollector collector;
     public SubScorer next;
     public boolean more;
 
     public SubScorer(BulkScorer scorer, boolean required, boolean prohibited,
-        Collector collector, SubScorer next) {
+        LeafCollector collector, SubScorer next) {
       if (required) {
         throw new IllegalArgumentException("this scorer cannot handle required=true");
       }
@@ -200,7 +195,7 @@
   }
 
   @Override
-  public boolean score(Collector collector, int max) throws IOException {
+  public boolean score(LeafCollector collector, int max) throws IOException {
 
     boolean more;
     Bucket tmp;
diff --git a/lucene/core/src/java/org/apache/lucene/search/BulkScorer.java b/lucene/core/src/java/org/apache/lucene/search/BulkScorer.java
index 2331cae..7ba1b39 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BulkScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BulkScorer.java
@@ -31,7 +31,7 @@
   /** Scores and collects all matching documents.
    * @param collector The collector to which all matching documents are passed.
    */
-  public void score(Collector collector) throws IOException {
+  public void score(LeafCollector collector) throws IOException {
     score(collector, Integer.MAX_VALUE);
   }
 
@@ -42,5 +42,5 @@
    * @param max Score up to, but not including, this doc
    * @return true if more matching documents may remain.
    */
-  public abstract boolean score(Collector collector, int max) throws IOException;
+  public abstract boolean score(LeafCollector collector, int max) throws IOException;
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
index 23e1590..c5957d8 100644
--- a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
@@ -18,10 +18,12 @@
  */
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.RamUsageEstimator;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 
 /**
@@ -38,317 +40,280 @@
  * scoring is cached) per collected document.  If the result
  * set is large this can easily be a very substantial amount
  * of RAM!
- * 
- * <p><b>NOTE</b>: this class caches at least 128 documents
- * before checking RAM limits.
- * 
+ *
  * <p>See the Lucene <tt>modules/grouping</tt> module for more
  * details including a full code example.</p>
  *
  * @lucene.experimental
  */
-public abstract class CachingCollector extends Collector {
-  
-  // Max out at 512K arrays
-  private static final int MAX_ARRAY_SIZE = 512 * 1024;
+public abstract class CachingCollector extends FilterCollector {
+
   private static final int INITIAL_ARRAY_SIZE = 128;
-  private final static int[] EMPTY_INT_ARRAY = new int[0];
 
-  private static class SegStart {
-    public final AtomicReaderContext readerContext;
-    public final int end;
-
-    public SegStart(AtomicReaderContext readerContext, int end) {
-      this.readerContext = readerContext;
-      this.end = end;
-    }
-  }
-  
   private static final class CachedScorer extends Scorer {
-    
+
     // NOTE: these members are package-private b/c that way accessing them from
     // the outer class does not incur access check by the JVM. The same
     // situation would be if they were defined in the outer class as private
     // members.
     int doc;
     float score;
-    
+
     private CachedScorer() { super(null); }
 
     @Override
     public final float score() { return score; }
-    
+
     @Override
     public final int advance(int target) { throw new UnsupportedOperationException(); }
-    
+
     @Override
     public final int docID() { return doc; }
-    
+
     @Override
     public final int freq() { throw new UnsupportedOperationException(); }
-    
+
     @Override
     public final int nextDoc() { throw new UnsupportedOperationException(); }
-    
+
     @Override
     public long cost() { return 1; }
+  }
+
+  private static class NoScoreCachingCollector extends CachingCollector {
+
+    List<Boolean> acceptDocsOutOfOrders;
+    List<AtomicReaderContext> contexts;
+    List<int[]> docs;
+    int maxDocsToCache;
+    NoScoreCachingLeafCollector lastCollector;
+
+    NoScoreCachingCollector(Collector in, int maxDocsToCache) {
+      super(in);
+      this.maxDocsToCache = maxDocsToCache;
+      contexts = new ArrayList<>();
+      acceptDocsOutOfOrders = new ArrayList<>();
+      docs = new ArrayList<>();
     }
 
-  // A CachingCollector which caches scores
-  private static final class ScoreCachingCollector extends CachingCollector {
-
-    private final CachedScorer cachedScorer;
-    private final List<float[]> cachedScores;
-
-    private Scorer scorer;
-    private float[] curScores;
-
-    ScoreCachingCollector(Collector other, double maxRAMMB) {
-      super(other, maxRAMMB, true);
-
-      cachedScorer = new CachedScorer();
-      cachedScores = new ArrayList<>();
-      curScores = new float[INITIAL_ARRAY_SIZE];
-      cachedScores.add(curScores);
+    protected NoScoreCachingLeafCollector wrap(LeafCollector in, int maxDocsToCache) {
+      return new NoScoreCachingLeafCollector(in, maxDocsToCache);
     }
 
-    ScoreCachingCollector(Collector other, int maxDocsToCache) {
-      super(other, maxDocsToCache);
-
-      cachedScorer = new CachedScorer();
-      cachedScores = new ArrayList<>();
-      curScores = new float[INITIAL_ARRAY_SIZE];
-      cachedScores.add(curScores);
+    public LeafCollector getLeafCollector(AtomicReaderContext context) throws IOException {
+      postCollection();
+      final LeafCollector in = this.in.getLeafCollector(context);
+      if (contexts != null) {
+        contexts.add(context);
+        acceptDocsOutOfOrders.add(in.acceptsDocsOutOfOrder());
+      }
+      if (maxDocsToCache >= 0) {
+        return lastCollector = wrap(in, maxDocsToCache);
+      } else {
+        return in;
+      }
     }
-    
+
+    protected void invalidate() {
+      maxDocsToCache = -1;
+      contexts = null;
+      this.docs = null;
+    }
+
+    protected void postCollect(NoScoreCachingLeafCollector collector) {
+      final int[] docs = collector.cachedDocs();
+      maxDocsToCache -= docs.length;
+      this.docs.add(docs);
+    }
+
+    private void postCollection() {
+      if (lastCollector != null) {
+        if (!lastCollector.hasCache()) {
+          invalidate();
+        } else {
+          postCollect(lastCollector);
+        }
+        lastCollector = null;
+      }
+    }
+
+    protected void collect(LeafCollector collector, int i) throws IOException {
+      final int[] docs = this.docs.get(i);
+      for (int doc : docs) {
+        collector.collect(doc);
+      }
+    }
+
+    public void replay(Collector other) throws IOException {
+      postCollection();
+      if (!isCached()) {
+        throw new IllegalStateException("cannot replay: cache was cleared because too much RAM was required");
+      }
+      assert docs.size() == contexts.size();
+      for (int i = 0; i < contexts.size(); ++i) {
+        final AtomicReaderContext context = contexts.get(i);
+        final boolean docsInOrder = !acceptDocsOutOfOrders.get(i);
+        final LeafCollector collector = other.getLeafCollector(context);
+        if (!collector.acceptsDocsOutOfOrder() && !docsInOrder) {
+          throw new IllegalArgumentException(
+                "cannot replay: given collector does not support "
+                    + "out-of-order collection, while the wrapped collector does. "
+                    + "Therefore cached documents may be out-of-order.");
+        }
+        collect(collector, i);
+      }
+    }
+
+  }
+
+  private static class ScoreCachingCollector extends NoScoreCachingCollector {
+
+    List<float[]> scores;
+
+    ScoreCachingCollector(Collector in, int maxDocsToCache) {
+      super(in, maxDocsToCache);
+      scores = new ArrayList<>();
+    }
+
+    protected NoScoreCachingLeafCollector wrap(LeafCollector in, int maxDocsToCache) {
+      return new ScoreCachingLeafCollector(in, maxDocsToCache);
+    }
+
+    @Override
+    protected void postCollect(NoScoreCachingLeafCollector collector) {
+      final ScoreCachingLeafCollector coll = (ScoreCachingLeafCollector) collector;
+      super.postCollect(coll);
+      scores.add(coll.cachedScores());
+    }
+
+    protected void collect(LeafCollector collector, int i) throws IOException {
+      final int[] docs = this.docs.get(i);
+      final float[] scores = this.scores.get(i);
+      assert docs.length == scores.length;
+      final CachedScorer scorer = new CachedScorer();
+      collector.setScorer(scorer);
+      for (int j = 0; j < docs.length; ++j) {
+        scorer.doc = docs[j];
+        scorer.score = scores[j];
+        collector.collect(scorer.doc);
+      }
+    }
+
+  }
+
+  private class NoScoreCachingLeafCollector extends FilterLeafCollector {
+
+    final int maxDocsToCache;
+    int[] docs;
+    int docCount;
+
+    NoScoreCachingLeafCollector(LeafCollector in, int maxDocsToCache) {
+      super(in);
+      this.maxDocsToCache = maxDocsToCache;
+      docs = new int[Math.min(maxDocsToCache, INITIAL_ARRAY_SIZE)];
+      docCount = 0;
+    }
+
+    protected void grow(int newLen) {
+      docs = Arrays.copyOf(docs, newLen);
+    }
+
+    protected void invalidate() {
+      docs = null;
+      docCount = -1;
+      cached = false;
+    }
+
+    protected void buffer(int doc) throws IOException {
+      docs[docCount] = doc;
+    }
+
     @Override
     public void collect(int doc) throws IOException {
-
-      if (curDocs == null) {
-        // Cache was too large
-        cachedScorer.score = scorer.score();
-        cachedScorer.doc = doc;
-        other.collect(doc);
-        return;
-      }
-
-      // Allocate a bigger array or abort caching
-      if (upto == curDocs.length) {
-        base += upto;
-        
-        // Compute next array length - don't allocate too big arrays
-        int nextLength = 8*curDocs.length;
-        if (nextLength > MAX_ARRAY_SIZE) {
-          nextLength = MAX_ARRAY_SIZE;
-        }
-
-        if (base + nextLength > maxDocsToCache) {
-          // try to allocate a smaller array
-          nextLength = maxDocsToCache - base;
-          if (nextLength <= 0) {
-            // Too many docs to collect -- clear cache
-            curDocs = null;
-            curScores = null;
-            cachedSegs.clear();
-            cachedDocs.clear();
-            cachedScores.clear();
-            cachedScorer.score = scorer.score();
-            cachedScorer.doc = doc;
-            other.collect(doc);
-            return;
+      if (docs != null) {
+        if (docCount >= docs.length) {
+          if (docCount >= maxDocsToCache) {
+            invalidate();
+          } else {
+            final int newLen = Math.min(ArrayUtil.oversize(docCount + 1, RamUsageEstimator.NUM_BYTES_INT), maxDocsToCache);
+            grow(newLen);
           }
         }
-        
-        curDocs = new int[nextLength];
-        cachedDocs.add(curDocs);
-        curScores = new float[nextLength];
-        cachedScores.add(curScores);
-        upto = 0;
+        if (docs != null) {
+          buffer(doc);
+          ++docCount;
+        }
       }
-      
-      curDocs[upto] = doc;
-      cachedScorer.score = curScores[upto] = scorer.score();
-      upto++;
-      cachedScorer.doc = doc;
-      other.collect(doc);
+      super.collect(doc);
     }
 
-    @Override
-    public void replay(Collector other) throws IOException {
-      replayInit(other);
-      
-      int curUpto = 0;
-      int curBase = 0;
-      int chunkUpto = 0;
-      curDocs = EMPTY_INT_ARRAY;
-      for (SegStart seg : cachedSegs) {
-        other.setNextReader(seg.readerContext);
-        other.setScorer(cachedScorer);
-        while (curBase + curUpto < seg.end) {
-          if (curUpto == curDocs.length) {
-            curBase += curDocs.length;
-            curDocs = cachedDocs.get(chunkUpto);
-            curScores = cachedScores.get(chunkUpto);
-            chunkUpto++;
-            curUpto = 0;
-          }
-          cachedScorer.score = curScores[curUpto];
-          cachedScorer.doc = curDocs[curUpto];
-          other.collect(curDocs[curUpto++]);
-        }
-      }
+    boolean hasCache() {
+      return docs != null;
+    }
+
+    int[] cachedDocs() {
+      return docs == null ? null : Arrays.copyOf(docs, docCount);
+    }
+
+  }
+
+  private class ScoreCachingLeafCollector extends NoScoreCachingLeafCollector {
+
+    Scorer scorer;
+    float[] scores;
+
+    ScoreCachingLeafCollector(LeafCollector in, int maxDocsToCache) {
+      super(in, maxDocsToCache);
+      scores = new float[docs.length];
     }
 
     @Override
     public void setScorer(Scorer scorer) throws IOException {
       this.scorer = scorer;
-      other.setScorer(cachedScorer);
+      super.setScorer(scorer);
     }
 
     @Override
-    public String toString() {
-      if (isCached()) {
-        return "CachingCollector (" + (base+upto) + " docs & scores cached)";
-      } else {
-        return "CachingCollector (cache was cleared)";
-      }
+    protected void grow(int newLen) {
+      super.grow(newLen);
+      scores = Arrays.copyOf(scores, newLen);
     }
 
+    @Override
+    protected void invalidate() {
+      super.invalidate();
+      scores = null;
+    }
+
+    @Override
+    protected void buffer(int doc) throws IOException {
+      super.buffer(doc);
+      scores[docCount] = scorer.score();
+    }
+
+    float[] cachedScores() {
+      return docs == null ? null : Arrays.copyOf(scores, docCount);
+    }
   }
 
-  // A CachingCollector which does not cache scores
-  private static final class NoScoreCachingCollector extends CachingCollector {
-    
-    NoScoreCachingCollector(Collector other, double maxRAMMB) {
-     super(other, maxRAMMB, false);
-    }
-
-    NoScoreCachingCollector(Collector other, int maxDocsToCache) {
-     super(other, maxDocsToCache);
-    }
-
-    @Override
-    public void collect(int doc) throws IOException {
-
-      if (curDocs == null) {
-        // Cache was too large
-        other.collect(doc);
-        return;
-      }
-
-      // Allocate a bigger array or abort caching
-      if (upto == curDocs.length) {
-        base += upto;
-        
-        // Compute next array length - don't allocate too big arrays
-        int nextLength = 8*curDocs.length;
-        if (nextLength > MAX_ARRAY_SIZE) {
-          nextLength = MAX_ARRAY_SIZE;
-        }
-
-        if (base + nextLength > maxDocsToCache) {
-          // try to allocate a smaller array
-          nextLength = maxDocsToCache - base;
-          if (nextLength <= 0) {
-            // Too many docs to collect -- clear cache
-            curDocs = null;
-            cachedSegs.clear();
-            cachedDocs.clear();
-            other.collect(doc);
-            return;
-          }
-        }
-        
-        curDocs = new int[nextLength];
-        cachedDocs.add(curDocs);
-        upto = 0;
-      }
-      
-      curDocs[upto] = doc;
-      upto++;
-      other.collect(doc);
-    }
-
-    @Override
-    public void replay(Collector other) throws IOException {
-      replayInit(other);
-      
-      int curUpto = 0;
-      int curbase = 0;
-      int chunkUpto = 0;
-      curDocs = EMPTY_INT_ARRAY;
-      for (SegStart seg : cachedSegs) {
-        other.setNextReader(seg.readerContext);
-        while (curbase + curUpto < seg.end) {
-          if (curUpto == curDocs.length) {
-            curbase += curDocs.length;
-            curDocs = cachedDocs.get(chunkUpto);
-            chunkUpto++;
-            curUpto = 0;
-          }
-          other.collect(curDocs[curUpto++]);
-        }
-      }
-    }
-
-    @Override
-    public void setScorer(Scorer scorer) throws IOException {
-      other.setScorer(scorer);
-    }
-
-    @Override
-    public String toString() {
-      if (isCached()) {
-        return "CachingCollector (" + (base+upto) + " docs cached)";
-      } else {
-        return "CachingCollector (cache was cleared)";
-      }
-    }
-
-  }
-
-  // TODO: would be nice if a collector defined a
-  // needsScores() method so we can specialize / do checks
-  // up front. This is only relevant for the ScoreCaching
-  // version -- if the wrapped Collector does not need
-  // scores, it can avoid cachedScorer entirely.
-  protected final Collector other;
-  
-  protected final int maxDocsToCache;
-  protected final List<SegStart> cachedSegs = new ArrayList<>();
-  protected final List<int[]> cachedDocs;
-  
-  private AtomicReaderContext lastReaderContext;
-  
-  protected int[] curDocs;
-  protected int upto;
-  protected int base;
-  protected int lastDocBase;
-  
   /**
    * Creates a {@link CachingCollector} which does not wrap another collector.
    * The cached documents and scores can later be {@link #replay(Collector)
    * replayed}.
-   * 
+   *
    * @param acceptDocsOutOfOrder
    *          whether documents are allowed to be collected out-of-order
    */
   public static CachingCollector create(final boolean acceptDocsOutOfOrder, boolean cacheScores, double maxRAMMB) {
-    Collector other = new Collector() {
+    Collector other = new SimpleCollector() {
       @Override
       public boolean acceptsDocsOutOfOrder() {
         return acceptDocsOutOfOrder;
       }
-      
-      @Override
-      public void setScorer(Scorer scorer) {}
 
       @Override
       public void collect(int doc) {}
 
-      @Override
-      public void setNextReader(AtomicReaderContext context) {}
-
     };
     return create(other, cacheScores, maxRAMMB);
   }
@@ -356,7 +321,7 @@
   /**
    * Create a new {@link CachingCollector} that wraps the given collector and
    * caches documents and scores up to the specified RAM threshold.
-   * 
+   *
    * @param other
    *          the Collector to wrap and delegate calls to.
    * @param cacheScores
@@ -368,7 +333,12 @@
    *          scores are cached.
    */
   public static CachingCollector create(Collector other, boolean cacheScores, double maxRAMMB) {
-    return cacheScores ? new ScoreCachingCollector(other, maxRAMMB) : new NoScoreCachingCollector(other, maxRAMMB);
+    int bytesPerDoc = RamUsageEstimator.NUM_BYTES_INT;
+    if (cacheScores) {
+      bytesPerDoc += RamUsageEstimator.NUM_BYTES_FLOAT;
+    }
+    final int maxDocsToCache = (int) ((maxRAMMB * 1024 * 1024) / bytesPerDoc);
+    return create(other, cacheScores, maxDocsToCache);
   }
 
   /**
@@ -388,74 +358,26 @@
   public static CachingCollector create(Collector other, boolean cacheScores, int maxDocsToCache) {
     return cacheScores ? new ScoreCachingCollector(other, maxDocsToCache) : new NoScoreCachingCollector(other, maxDocsToCache);
   }
-  
-  // Prevent extension from non-internal classes
-  private CachingCollector(Collector other, double maxRAMMB, boolean cacheScores) {
-    this.other = other;
-    
-    cachedDocs = new ArrayList<>();
-    curDocs = new int[INITIAL_ARRAY_SIZE];
-    cachedDocs.add(curDocs);
 
-    int bytesPerDoc = RamUsageEstimator.NUM_BYTES_INT;
-    if (cacheScores) {
-      bytesPerDoc += RamUsageEstimator.NUM_BYTES_FLOAT;
-    }
-    maxDocsToCache = (int) ((maxRAMMB * 1024 * 1024) / bytesPerDoc);
+  private boolean cached;
+
+  private CachingCollector(Collector in) {
+    super(in);
+    cached = true;
   }
 
-  private CachingCollector(Collector other, int maxDocsToCache) {
-    this.other = other;
-
-    cachedDocs = new ArrayList<>();
-    curDocs = new int[INITIAL_ARRAY_SIZE];
-    cachedDocs.add(curDocs);
-    this.maxDocsToCache = maxDocsToCache;
-  }
-  
-  @Override
-  public boolean acceptsDocsOutOfOrder() {
-    return other.acceptsDocsOutOfOrder();
-  }
-
-  public boolean isCached() {
-    return curDocs != null;
-  }
-
-  @Override  
-  public void setNextReader(AtomicReaderContext context) throws IOException {
-    other.setNextReader(context);
-    if (lastReaderContext != null) {
-      cachedSegs.add(new SegStart(lastReaderContext, base+upto));
-    }
-    lastReaderContext = context;
-  }
-
-  /** Reused by the specialized inner classes. */
-  void replayInit(Collector other) {
-    if (!isCached()) {
-      throw new IllegalStateException("cannot replay: cache was cleared because too much RAM was required");
-    }
-    
-    if (!other.acceptsDocsOutOfOrder() && this.other.acceptsDocsOutOfOrder()) {
-      throw new IllegalArgumentException(
-          "cannot replay: given collector does not support "
-              + "out-of-order collection, while the wrapped collector does. "
-              + "Therefore cached documents may be out-of-order.");
-    }
-    
-    //System.out.println("CC: replay totHits=" + (upto + base));
-    if (lastReaderContext != null) {
-      cachedSegs.add(new SegStart(lastReaderContext, base+upto));
-      lastReaderContext = null;
-    }
+  /**
+   * Return true is this collector is able to replay collection.
+   */
+  public final boolean isCached() {
+    return cached;
   }
 
   /**
    * Replays the cached doc IDs (and scores) to the given Collector. If this
    * instance does not cache scores, then Scorer is not set on
    * {@code other.setScorer} as well as scores are not replayed.
-   * 
+   *
    * @throws IllegalStateException
    *           if this collector is not cached (i.e., if the RAM limits were too
    *           low for the number of documents + scores to cache).
@@ -464,5 +386,5 @@
    *           while the collector passed to the ctor does.
    */
   public abstract void replay(Collector other) throws IOException;
-  
+
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/CollectionTerminatedException.java b/lucene/core/src/java/org/apache/lucene/search/CollectionTerminatedException.java
index 9caadfa..a4c426a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/CollectionTerminatedException.java
+++ b/lucene/core/src/java/org/apache/lucene/search/CollectionTerminatedException.java
@@ -17,7 +17,7 @@
  * limitations under the License.
  */
 
-/** Throw this exception in {@link Collector#collect(int)} to prematurely
+/** Throw this exception in {@link LeafCollector#collect(int)} to prematurely
  *  terminate collection of the current leaf.
  *  <p>Note: IndexSearcher swallows this exception and never re-throws it.
  *  As a consequence, you should not catch it when calling
diff --git a/lucene/core/src/java/org/apache/lucene/search/Collector.java b/lucene/core/src/java/org/apache/lucene/search/Collector.java
index 312f507..bb47394 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Collector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Collector.java
@@ -20,20 +20,19 @@
 import java.io.IOException;
 
 import org.apache.lucene.index.AtomicReaderContext;
-import org.apache.lucene.index.IndexReaderContext;
 
 /**
  * <p>Expert: Collectors are primarily meant to be used to
  * gather raw results from a search, and implement sorting
  * or custom result filtering, collation, etc. </p>
  *
- * <p>Lucene's core collectors are derived from Collector.
- * Likely your application can use one of these classes, or
- * subclass {@link TopDocsCollector}, instead of
- * implementing Collector directly:
+ * <p>Lucene's core collectors are derived from {@link Collector}
+ * and {@link SimpleCollector}. Likely your application can
+ * use one of these classes, or subclass {@link TopDocsCollector},
+ * instead of implementing Collector directly:
  *
  * <ul>
- *      
+ *
  *   <li>{@link TopDocsCollector} is an abstract base class
  *   that assumes you will retrieve the top N docs,
  *   according to some criteria, after collection is
@@ -62,118 +61,16 @@
  *
  * </ul>
  *
- * <p>Collector decouples the score from the collected doc:
- * the score computation is skipped entirely if it's not
- * needed.  Collectors that do need the score should
- * implement the {@link #setScorer} method, to hold onto the
- * passed {@link Scorer} instance, and call {@link
- * Scorer#score()} within the collect method to compute the
- * current hit's score.  If your collector may request the
- * score for a single hit multiple times, you should use
- * {@link ScoreCachingWrappingScorer}. </p>
- * 
- * <p><b>NOTE:</b> The doc that is passed to the collect
- * method is relative to the current reader. If your
- * collector needs to resolve this to the docID space of the
- * Multi*Reader, you must re-base it by recording the
- * docBase from the most recent setNextReader call.  Here's
- * a simple example showing how to collect docIDs into a
- * BitSet:</p>
- * 
- * <pre class="prettyprint">
- * IndexSearcher searcher = new IndexSearcher(indexReader);
- * final BitSet bits = new BitSet(indexReader.maxDoc());
- * searcher.search(query, new Collector() {
- *   private int docBase;
- * 
- *   <em>// ignore scorer</em>
- *   public void setScorer(Scorer scorer) {
- *   }
- *
- *   <em>// accept docs out of order (for a BitSet it doesn't matter)</em>
- *   public boolean acceptsDocsOutOfOrder() {
- *     return true;
- *   }
- * 
- *   public void collect(int doc) {
- *     bits.set(doc + docBase);
- *   }
- * 
- *   public void setNextReader(AtomicReaderContext context) {
- *     this.docBase = context.docBase;
- *   }
- * });
- * </pre>
- *
- * <p>Not all collectors will need to rebase the docID.  For
- * example, a collector that simply counts the total number
- * of hits would skip it.</p>
- * 
- * <p><b>NOTE:</b> Prior to 2.9, Lucene silently filtered
- * out hits with score <= 0.  As of 2.9, the core Collectors
- * no longer do that.  It's very unusual to have such hits
- * (a negative query boost, or function query returning
- * negative custom scores, could cause it to happen).  If
- * you need that behavior, use {@link
- * PositiveScoresOnlyCollector}.</p>
- *
  * @lucene.experimental
- * 
- * @since 2.9
  */
-public abstract class Collector {
-  
-  /**
-   * Called before successive calls to {@link #collect(int)}. Implementations
-   * that need the score of the current document (passed-in to
-   * {@link #collect(int)}), should save the passed-in Scorer and call
-   * scorer.score() when needed.
-   */
-  public abstract void setScorer(Scorer scorer) throws IOException;
-  
-  /**
-   * Called once for every document matching a query, with the unbased document
-   * number.
-   * <p>Note: The collection of the current segment can be terminated by throwing
-   * a {@link CollectionTerminatedException}. In this case, the last docs of the
-   * current {@link AtomicReaderContext} will be skipped and {@link IndexSearcher}
-   * will swallow the exception and continue collection with the next leaf.
-   * <p>
-   * Note: This is called in an inner search loop. For good search performance,
-   * implementations of this method should not call {@link IndexSearcher#doc(int)} or
-   * {@link org.apache.lucene.index.IndexReader#document(int)} on every hit.
-   * Doing so can slow searches by an order of magnitude or more.
-   */
-  public abstract void collect(int doc) throws IOException;
+public interface Collector {
 
   /**
-   * Called before collecting from each {@link AtomicReaderContext}. All doc ids in
-   * {@link #collect(int)} will correspond to {@link IndexReaderContext#reader}.
-   * 
-   * Add {@link AtomicReaderContext#docBase} to the current  {@link IndexReaderContext#reader}'s
-   * internal document id to re-base ids in {@link #collect(int)}.
-   * 
+   * Create a new {@link LeafCollector collector} to collect the given context.
+   *
    * @param context
    *          next atomic reader context
    */
-  public abstract void setNextReader(AtomicReaderContext context) throws IOException;
+  LeafCollector getLeafCollector(AtomicReaderContext context) throws IOException;
 
-  /**
-   * Return <code>true</code> if this collector does not
-   * require the matching docIDs to be delivered in int sort
-   * order (smallest to largest) to {@link #collect}.
-   *
-   * <p> Most Lucene Query implementations will visit
-   * matching docIDs in order.  However, some queries
-   * (currently limited to certain cases of {@link
-   * BooleanQuery}) can achieve faster searching if the
-   * <code>Collector</code> allows them to deliver the
-   * docIDs out of order.</p>
-   *
-   * <p> Many collectors don't mind getting docIDs out of
-   * order, so it's important to return <code>true</code>
-   * here.
-   */
-  public abstract boolean acceptsDocsOutOfOrder();
-  
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
index a917a0c..2b7f4ed 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
@@ -212,31 +212,16 @@
     }
 
     @Override
-    public boolean score(Collector collector, int max) throws IOException {
+    public boolean score(LeafCollector collector, int max) throws IOException {
       return bulkScorer.score(wrapCollector(collector), max);
     }
 
-    private Collector wrapCollector(final Collector collector) {
-      return new Collector() {
+    private LeafCollector wrapCollector(LeafCollector collector) {
+      return new FilterLeafCollector(collector) {
         @Override
         public void setScorer(Scorer scorer) throws IOException {
           // we must wrap again here, but using the scorer passed in as parameter:
-          collector.setScorer(new ConstantScorer(scorer, weight, theScore));
-        }
-        
-        @Override
-        public void collect(int doc) throws IOException {
-          collector.collect(doc);
-        }
-        
-        @Override
-        public void setNextReader(AtomicReaderContext context) throws IOException {
-          collector.setNextReader(context);
-        }
-        
-        @Override
-        public boolean acceptsDocsOutOfOrder() {
-          return collector.acceptsDocsOutOfOrder();
+          in.setScorer(new ConstantScorer(scorer, weight, theScore));
         }
       };
     }
diff --git a/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java b/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java
index 89b92a5..e2a50c8 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java
@@ -20,7 +20,7 @@
 import java.util.Collection;
 
 /** Used by {@link BulkScorer}s that need to pass a {@link
- *  Scorer} to {@link Collector#setScorer}. */
+ *  Scorer} to {@link LeafCollector#setScorer}. */
 final class FakeScorer extends Scorer {
   float score;
   int doc = -1;
diff --git a/lucene/core/src/java/org/apache/lucene/search/FilterCollector.java b/lucene/core/src/java/org/apache/lucene/search/FilterCollector.java
new file mode 100644
index 0000000..247bb03
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/FilterCollector.java
@@ -0,0 +1,48 @@
+package org.apache.lucene.search;
+
+import java.io.IOException;
+
+import org.apache.lucene.index.AtomicReaderContext;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * {@link Collector} delegator.
+ *
+ * @lucene.experimental
+ */
+public class FilterCollector implements Collector {
+
+  protected final Collector in;
+
+  /** Sole constructor. */
+  public FilterCollector(Collector in) {
+    this.in = in;
+  }
+
+  @Override
+  public LeafCollector getLeafCollector(AtomicReaderContext context) throws IOException {
+    return in.getLeafCollector(context);
+  }
+
+  @Override
+  public String toString() {
+    return getClass().getSimpleName() + "(" + in + ")";
+  }
+  
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FilterLeafCollector.java b/lucene/core/src/java/org/apache/lucene/search/FilterLeafCollector.java
new file mode 100644
index 0000000..e3ae9a8
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/FilterLeafCollector.java
@@ -0,0 +1,56 @@
+package org.apache.lucene.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+/**
+ * {@link LeafCollector} delegator.
+ *
+ * @lucene.experimental
+ */
+public class FilterLeafCollector implements LeafCollector {
+
+  protected final LeafCollector in;
+
+  /** Sole constructor. */
+  public FilterLeafCollector(LeafCollector in) {
+    this.in = in;
+  }
+
+  @Override
+  public void setScorer(Scorer scorer) throws IOException {
+    in.setScorer(scorer);
+  }
+
+  @Override
+  public void collect(int doc) throws IOException {
+    in.collect(doc);
+  }
+
+  @Override
+  public boolean acceptsDocsOutOfOrder() {
+    return in.acceptsDocsOutOfOrder();
+  }
+
+  @Override
+  public String toString() {
+    return getClass().getSimpleName() + "(" + in + ")";
+  }
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java b/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java
index f4ef594..d700a30 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java
@@ -225,7 +225,7 @@
     }
 
     @Override
-    public boolean score(Collector collector, int maxDoc) throws IOException {
+    public boolean score(LeafCollector collector, int maxDoc) throws IOException {
       // the normalization trick already applies the boost of this query,
       // so we can use the wrapped scorer directly:
       collector.setScorer(scorer);
diff --git a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
index 8b33ae7..8f1a5f6 100644
--- a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
+++ b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
@@ -275,7 +275,7 @@
 
   /** Lower-level search API.
    *
-   * <p>{@link Collector#collect(int)} is called for every matching
+   * <p>{@link LeafCollector#collect(int)} is called for every matching
    * document.
    *
    * @param query to match documents
@@ -291,7 +291,7 @@
 
   /** Lower-level search API.
    *
-   * <p>{@link Collector#collect(int)} is called for every matching document.
+   * <p>{@link LeafCollector#collect(int)} is called for every matching document.
    *
    * @throws BooleanQuery.TooManyClauses If a query would exceed 
    *         {@link BooleanQuery#getMaxClauseCount()} clauses.
@@ -578,7 +578,7 @@
    * Lower-level search API.
    * 
    * <p>
-   * {@link Collector#collect(int)} is called for every document. <br>
+   * {@link LeafCollector#collect(int)} is called for every document. <br>
    * 
    * <p>
    * NOTE: this method executes the searches on all given leaves exclusively.
@@ -600,17 +600,18 @@
     // threaded...?  the Collector could be sync'd?
     // always use single thread:
     for (AtomicReaderContext ctx : leaves) { // search each subreader
+      final LeafCollector leafCollector;
       try {
-        collector.setNextReader(ctx);
+        leafCollector = collector.getLeafCollector(ctx);
       } catch (CollectionTerminatedException e) {
         // there is no doc of interest in this reader context
         // continue with the following leaf
         continue;
       }
-      BulkScorer scorer = weight.bulkScorer(ctx, !collector.acceptsDocsOutOfOrder(), ctx.reader().getLiveDocs());
+      BulkScorer scorer = weight.bulkScorer(ctx, !leafCollector.acceptsDocsOutOfOrder(), ctx.reader().getLiveDocs());
       if (scorer != null) {
         try {
-          scorer.score(collector);
+          scorer.score(leafCollector);
         } catch (CollectionTerminatedException e) {
           // collection was terminated prematurely
           // continue with the following leaf
@@ -779,12 +780,12 @@
       try {
         final AtomicReaderContext ctx = slice.leaves[0];
         final int base = ctx.docBase;
-        hq.setNextReader(ctx);
-        hq.setScorer(fakeScorer);
+        final LeafCollector collector = hq.getLeafCollector(ctx);
+        collector.setScorer(fakeScorer);
         for(ScoreDoc scoreDoc : docs.scoreDocs) {
           fakeScorer.doc = scoreDoc.doc - base;
           fakeScorer.score = scoreDoc.score;
-          hq.collect(scoreDoc.doc-base);
+          collector.collect(scoreDoc.doc-base);
         }
 
         // Carry over maxScore from sub:
diff --git a/lucene/core/src/java/org/apache/lucene/search/LeafCollector.java b/lucene/core/src/java/org/apache/lucene/search/LeafCollector.java
new file mode 100644
index 0000000..562e76d
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/LeafCollector.java
@@ -0,0 +1,121 @@
+package org.apache.lucene.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.AtomicReaderContext;
+
+/**
+ * <p>Collector decouples the score from the collected doc:
+ * the score computation is skipped entirely if it's not
+ * needed.  Collectors that do need the score should
+ * implement the {@link #setScorer} method, to hold onto the
+ * passed {@link Scorer} instance, and call {@link
+ * Scorer#score()} within the collect method to compute the
+ * current hit's score.  If your collector may request the
+ * score for a single hit multiple times, you should use
+ * {@link ScoreCachingWrappingScorer}. </p>
+ * 
+ * <p><b>NOTE:</b> The doc that is passed to the collect
+ * method is relative to the current reader. If your
+ * collector needs to resolve this to the docID space of the
+ * Multi*Reader, you must re-base it by recording the
+ * docBase from the most recent setNextReader call.  Here's
+ * a simple example showing how to collect docIDs into a
+ * BitSet:</p>
+ * 
+ * <pre class="prettyprint">
+ * IndexSearcher searcher = new IndexSearcher(indexReader);
+ * final BitSet bits = new BitSet(indexReader.maxDoc());
+ * searcher.search(query, new Collector() {
+ *
+ *   public LeafCollector getLeafCollector(AtomicReaderContext context)
+ *       throws IOException {
+ *     final int docBase = context.docBase;
+ *     return new LeafCollector() {
+ *
+ *       <em>// ignore scorer</em>
+ *       public void setScorer(Scorer scorer) throws IOException {
+ *       }
+ *
+ *       public void collect(int doc) throws IOException {
+ *         bits.set(docBase + doc);
+ *       }
+ *
+ *       // accept docs out of order (for a BitSet it doesn't matter)
+ *       public boolean acceptsDocsOutOfOrder() {
+ *         return true;
+ *       }
+ *          
+ *     };
+ *   }
+ *      
+ * });
+ * </pre>
+ *
+ * <p>Not all collectors will need to rebase the docID.  For
+ * example, a collector that simply counts the total number
+ * of hits would skip it.</p>
+ *
+ * @lucene.experimental
+ */
+public interface LeafCollector {
+
+  /**
+   * Called before successive calls to {@link #collect(int)}. Implementations
+   * that need the score of the current document (passed-in to
+   * {@link #collect(int)}), should save the passed-in Scorer and call
+   * scorer.score() when needed.
+   */
+  void setScorer(Scorer scorer) throws IOException;
+  
+  /**
+   * Called once for every document matching a query, with the unbased document
+   * number.
+   * <p>Note: The collection of the current segment can be terminated by throwing
+   * a {@link CollectionTerminatedException}. In this case, the last docs of the
+   * current {@link AtomicReaderContext} will be skipped and {@link IndexSearcher}
+   * will swallow the exception and continue collection with the next leaf.
+   * <p>
+   * Note: This is called in an inner search loop. For good search performance,
+   * implementations of this method should not call {@link IndexSearcher#doc(int)} or
+   * {@link org.apache.lucene.index.IndexReader#document(int)} on every hit.
+   * Doing so can slow searches by an order of magnitude or more.
+   */
+  void collect(int doc) throws IOException;
+
+  /**
+   * Return <code>true</code> if this collector does not
+   * require the matching docIDs to be delivered in int sort
+   * order (smallest to largest) to {@link #collect}.
+   *
+   * <p> Most Lucene Query implementations will visit
+   * matching docIDs in order.  However, some queries
+   * (currently limited to certain cases of {@link
+   * BooleanQuery}) can achieve faster searching if the
+   * <code>Collector</code> allows them to deliver the
+   * docIDs out of order.</p>
+   *
+   * <p> Many collectors don't mind getting docIDs out of
+   * order, so it's important to return <code>true</code>
+   * here.
+   */
+  boolean acceptsDocsOutOfOrder();
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/MultiCollector.java b/lucene/core/src/java/org/apache/lucene/search/MultiCollector.java
index 40c0838..859b893 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MultiCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MultiCollector.java
@@ -18,6 +18,7 @@
  */
 
 import java.io.IOException;
+import java.util.Arrays;
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.search.Collector;
@@ -29,7 +30,12 @@
  * list of collectors and wraps them with {@link MultiCollector}, while
  * filtering out the <code>null</code> null ones.
  */
-public class MultiCollector extends Collector {
+public class MultiCollector implements Collector {
+
+  /** See {@link #wrap(Iterable)}. */
+  public static Collector wrap(Collector... collectors) {
+    return wrap(Arrays.asList(collectors));
+  }
 
   /**
    * Wraps a list of {@link Collector}s with a {@link MultiCollector}. This
@@ -47,7 +53,7 @@
    *           if either 0 collectors were input, or all collectors are
    *           <code>null</code>.
    */
-  public static Collector wrap(Collector... collectors) {
+  public static Collector wrap(Iterable<? extends Collector> collectors) {
     // For the user's convenience, we allow null collectors to be passed.
     // However, to improve performance, these null collectors are found
     // and dropped from the array we save for actual collection time.
@@ -70,8 +76,6 @@
         }
       }
       return col;
-    } else if (n == collectors.length) {
-      return new MultiCollector(collectors);
     } else {
       Collector[] colls = new Collector[n];
       n = 0;
@@ -91,34 +95,47 @@
   }
 
   @Override
-  public boolean acceptsDocsOutOfOrder() {
-    for (Collector c : collectors) {
-      if (!c.acceptsDocsOutOfOrder()) {
-        return false;
+  public LeafCollector getLeafCollector(AtomicReaderContext context) throws IOException {
+    final LeafCollector[] leafCollectors = new LeafCollector[collectors.length];
+    for (int i = 0; i < collectors.length; ++i) {
+      leafCollectors[i] = collectors[i].getLeafCollector(context);
+    }
+    return new MultiLeafCollector(leafCollectors);
+  }
+
+
+  private static class MultiLeafCollector implements LeafCollector {
+
+    private final LeafCollector[] collectors;
+
+    private MultiLeafCollector(LeafCollector[] collectors) {
+      this.collectors = collectors;
+    }
+
+    @Override
+    public void setScorer(Scorer scorer) throws IOException {
+      for (LeafCollector c : collectors) {
+        c.setScorer(scorer);
       }
     }
-    return true;
-  }
 
-  @Override
-  public void collect(int doc) throws IOException {
-    for (Collector c : collectors) {
-      c.collect(doc);
+    @Override
+    public void collect(int doc) throws IOException {
+      for (LeafCollector c : collectors) {
+        c.collect(doc);
+      }
     }
-  }
 
-  @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
-    for (Collector c : collectors) {
-      c.setNextReader(context);
+    @Override
+    public boolean acceptsDocsOutOfOrder() {
+      for (LeafCollector c : collectors) {
+        if (!c.acceptsDocsOutOfOrder()) {
+          return false;
+        }
+      }
+      return true;
     }
-  }
 
-  @Override
-  public void setScorer(Scorer s) throws IOException {
-    for (Collector c : collectors) {
-      c.setScorer(s);
-    }
   }
 
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/PositiveScoresOnlyCollector.java b/lucene/core/src/java/org/apache/lucene/search/PositiveScoresOnlyCollector.java
index d35a755..ba22295 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PositiveScoresOnlyCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PositiveScoresOnlyCollector.java
@@ -26,38 +26,33 @@
  * {@link Collector} and makes sure only documents with
  * scores &gt; 0 are collected.
  */
-public class PositiveScoresOnlyCollector extends Collector {
+public class PositiveScoresOnlyCollector extends FilterCollector {
 
-  final private Collector c;
-  private Scorer scorer;
-  
-  public PositiveScoresOnlyCollector(Collector c) {
-    this.c = c;
-  }
-  
-  @Override
-  public void collect(int doc) throws IOException {
-    if (scorer.score() > 0) {
-      c.collect(doc);
-    }
+  public PositiveScoresOnlyCollector(Collector in) {
+    super(in);
   }
 
   @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
-    c.setNextReader(context);
-  }
+  public LeafCollector getLeafCollector(AtomicReaderContext context)
+      throws IOException {
+    return new FilterLeafCollector(super.getLeafCollector(context)) {
 
-  @Override
-  public void setScorer(Scorer scorer) throws IOException {
-    // Set a ScoreCachingWrappingScorer in case the wrapped Collector will call
-    // score() also.
-    this.scorer = new ScoreCachingWrappingScorer(scorer);
-    c.setScorer(this.scorer);
-  }
+      private Scorer scorer;
 
-  @Override
-  public boolean acceptsDocsOutOfOrder() {
-    return c.acceptsDocsOutOfOrder();
+      @Override
+      public void setScorer(Scorer scorer) throws IOException {
+        this.scorer = new ScoreCachingWrappingScorer(scorer);
+        in.setScorer(this.scorer);
+      }
+
+      @Override
+      public void collect(int doc) throws IOException {
+        if (scorer.score() > 0) {
+          in.collect(doc);
+        }
+      }
+      
+    };
   }
 
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/ScoreCachingWrappingScorer.java b/lucene/core/src/java/org/apache/lucene/search/ScoreCachingWrappingScorer.java
index 471dc20..844290c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ScoreCachingWrappingScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ScoreCachingWrappingScorer.java
@@ -37,7 +37,7 @@
   private final Scorer scorer;
   private int curDoc = -1;
   private float curScore;
-  
+
   /** Creates a new instance by wrapping the given scorer. */
   public ScoreCachingWrappingScorer(Scorer scorer) {
     super(scorer.weight);
@@ -51,7 +51,7 @@
       curScore = scorer.score();
       curDoc = doc;
     }
-    
+
     return curScore;
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/Scorer.java b/lucene/core/src/java/org/apache/lucene/search/Scorer.java
index abcbb61..929d3b9 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Scorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Scorer.java
@@ -57,7 +57,7 @@
   /** Returns the score of the current document matching the query.
    * Initially invalid, until {@link #nextDoc()} or {@link #advance(int)}
    * is called the first time, or when called from within
-   * {@link Collector#collect}.
+   * {@link LeafCollector#collect}.
    */
   public abstract float score() throws IOException;
   
diff --git a/lucene/core/src/java/org/apache/lucene/search/SimpleCollector.java b/lucene/core/src/java/org/apache/lucene/search/SimpleCollector.java
new file mode 100644
index 0000000..5803b2e
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/SimpleCollector.java
@@ -0,0 +1,53 @@
+package org.apache.lucene.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.AtomicReaderContext;
+
+/**
+ * Base {@link Collector} implementation that is used to collect all contexts.
+ *
+ * @lucene.experimental
+ */
+public abstract class SimpleCollector implements Collector, LeafCollector {
+
+  @Override
+  public final LeafCollector getLeafCollector(AtomicReaderContext context) throws IOException {
+    doSetNextReader(context);
+    return this;
+  }
+
+  /** This method is called before collecting <code>context</code>. */
+  protected void doSetNextReader(AtomicReaderContext context) throws IOException {}
+
+  @Override
+  public void setScorer(Scorer scorer) throws IOException {
+    // no-op by default
+  }
+
+  // redeclare methods so that javadocs are inherited on sub-classes
+
+  @Override
+  public abstract boolean acceptsDocsOutOfOrder();
+
+  @Override
+  public abstract void collect(int doc) throws IOException;
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/SortRescorer.java b/lucene/core/src/java/org/apache/lucene/search/SortRescorer.java
index 1bb2134..6f125e8 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SortRescorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SortRescorer.java
@@ -75,7 +75,7 @@
 
       if (readerContext != null) {
         // We advanced to another segment:
-        collector.setNextReader(readerContext);
+        collector.getLeafCollector(readerContext);
         collector.setScorer(fakeScorer);
         docBase = readerContext.docBase;
       }
diff --git a/lucene/core/src/java/org/apache/lucene/search/TimeLimitingCollector.java b/lucene/core/src/java/org/apache/lucene/search/TimeLimitingCollector.java
index 2d2eb0e..9a08a2b 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TimeLimitingCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TimeLimitingCollector.java
@@ -29,7 +29,7 @@
  * exceeded, the search thread is stopped by throwing a
  * {@link TimeExceededException}.
  */
-public class TimeLimitingCollector extends Collector {
+public class TimeLimitingCollector implements Collector {
 
 
   /** Thrown when elapsed search time exceeds allowed search time. */
@@ -131,45 +131,30 @@
     this.greedy = greedy;
   }
   
-  /**
-   * Calls {@link Collector#collect(int)} on the decorated {@link Collector}
-   * unless the allowed time has passed, in which case it throws an exception.
-   * 
-   * @throws TimeExceededException
-   *           if the time allowed has exceeded.
-   */
   @Override
-  public void collect(final int doc) throws IOException {
-    final long time = clock.get();
-    if (timeout < time) {
-      if (greedy) {
-        //System.out.println(this+"  greedy: before failing, collecting doc: "+(docBase + doc)+"  "+(time-t0));
-        collector.collect(doc);
-      }
-      //System.out.println(this+"  failing on:  "+(docBase + doc)+"  "+(time-t0));
-      throw new TimeExceededException( timeout-t0, time-t0, docBase + doc );
-    }
-    //System.out.println(this+"  collecting: "+(docBase + doc)+"  "+(time-t0));
-    collector.collect(doc);
-  }
-  
-  @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
-    collector.setNextReader(context);
+  public LeafCollector getLeafCollector(AtomicReaderContext context) throws IOException {
     this.docBase = context.docBase;
     if (Long.MIN_VALUE == t0) {
       setBaseline();
     }
-  }
-  
-  @Override
-  public void setScorer(Scorer scorer) throws IOException {
-    collector.setScorer(scorer);
-  }
-
-  @Override
-  public boolean acceptsDocsOutOfOrder() {
-    return collector.acceptsDocsOutOfOrder();
+    return new FilterLeafCollector(collector.getLeafCollector(context)) {
+      
+      @Override
+      public void collect(int doc) throws IOException {
+        final long time = clock.get();
+        if (timeout < time) {
+          if (greedy) {
+            //System.out.println(this+"  greedy: before failing, collecting doc: "+(docBase + doc)+"  "+(time-t0));
+            in.collect(doc);
+          }
+          //System.out.println(this+"  failing on:  "+(docBase + doc)+"  "+(time-t0));
+          throw new TimeExceededException( timeout-t0, time-t0, docBase + doc );
+        }
+        //System.out.println(this+"  collecting: "+(docBase + doc)+"  "+(time-t0));
+        in.collect(doc);
+      }
+      
+    };
   }
   
   /**
diff --git a/lucene/core/src/java/org/apache/lucene/search/TopDocsCollector.java b/lucene/core/src/java/org/apache/lucene/search/TopDocsCollector.java
index 5e7dd50..bd0687e 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TopDocsCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TopDocsCollector.java
@@ -31,7 +31,7 @@
  * however, you might want to consider overriding all methods, in order to avoid
  * a NullPointerException.
  */
-public abstract class TopDocsCollector<T extends ScoreDoc> extends Collector {
+public abstract class TopDocsCollector<T extends ScoreDoc> extends SimpleCollector {
 
   /** This is used in case topDocs() is called with illegal parameters, or there
    *  simply aren't (enough) results. */
diff --git a/lucene/core/src/java/org/apache/lucene/search/TopFieldCollector.java b/lucene/core/src/java/org/apache/lucene/search/TopFieldCollector.java
index 3f1fa15..6f038c4 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TopFieldCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TopFieldCollector.java
@@ -92,7 +92,7 @@
     }
     
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       this.docBase = context.docBase;
       queue.setComparator(0, comparator.setNextReader(context));
       comparator = queue.firstComparator;
@@ -446,7 +446,7 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       docBase = context.docBase;
       for (int i = 0; i < comparators.length; i++) {
         queue.setComparator(i, comparators[i].setNextReader(context));
@@ -1001,7 +1001,7 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       docBase = context.docBase;
       afterDoc = after.doc - docBase;
       for (int i = 0; i < comparators.length; i++) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/TopScoreDocCollector.java b/lucene/core/src/java/org/apache/lucene/search/TopScoreDocCollector.java
index 0674779..bfebeda 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TopScoreDocCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TopScoreDocCollector.java
@@ -113,9 +113,9 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) {
-      super.setNextReader(context);
-      afterDoc = after.doc - docBase;
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
+      super.doSetNextReader(context);
+      afterDoc = after.doc - context.docBase;
     }
 
     @Override
@@ -208,9 +208,9 @@
     }
     
     @Override
-    public void setNextReader(AtomicReaderContext context) {
-      super.setNextReader(context);
-      afterDoc = after.doc - docBase;
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
+      super.doSetNextReader(context);
+      afterDoc = after.doc - context.docBase;
     }
     
     @Override
@@ -300,7 +300,7 @@
   }
   
   @Override
-  public void setNextReader(AtomicReaderContext context) {
+  protected void doSetNextReader(AtomicReaderContext context) throws IOException {
     docBase = context.docBase;
   }
   
diff --git a/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java b/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java
index 1704d8b..4fc5be6 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java
@@ -17,13 +17,12 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.index.AtomicReaderContext;
 
 /**
  * Just counts the total number of hits.
  */
 
-public class TotalHitCountCollector extends Collector {
+public class TotalHitCountCollector extends SimpleCollector {
   private int totalHits;
 
   /** Returns how many hits matched the search. */
@@ -32,19 +31,11 @@
   }
 
   @Override
-  public void setScorer(Scorer scorer) {
-  }
-
-  @Override
   public void collect(int doc) {
     totalHits++;
   }
 
   @Override
-  public void setNextReader(AtomicReaderContext context) {
-  }
-
-  @Override
   public boolean acceptsDocsOutOfOrder() {
     return true;
   }
diff --git a/lucene/core/src/java/org/apache/lucene/search/Weight.java b/lucene/core/src/java/org/apache/lucene/search/Weight.java
index 696c7ab..0603cd8 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Weight.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Weight.java
@@ -150,7 +150,7 @@
     }
 
     @Override
-    public boolean score(Collector collector, int max) throws IOException {
+    public boolean score(LeafCollector collector, int max) throws IOException {
       // TODO: this may be sort of weird, when we are
       // embedded in a BooleanScorer, because we are
       // called for every chunk of 2048 documents.  But,
@@ -172,7 +172,7 @@
   /**
    * Returns true iff this implementation scores docs only out of order. This
    * method is used in conjunction with {@link Collector}'s
-   * {@link Collector#acceptsDocsOutOfOrder() acceptsDocsOutOfOrder} and
+   * {@link LeafCollector#acceptsDocsOutOfOrder() acceptsDocsOutOfOrder} and
    * {@link #bulkScorer(AtomicReaderContext, boolean, Bits)} to
    * create a matching {@link Scorer} instance for a given {@link Collector}, or
    * vice versa.
diff --git a/lucene/core/src/java/org/apache/lucene/search/package.html b/lucene/core/src/java/org/apache/lucene/search/package.html
index 51e199e..889501a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/package.html
+++ b/lucene/core/src/java/org/apache/lucene/search/package.html
@@ -508,7 +508,7 @@
         abstract method:
         <ol>
             <li>
-                {@link org.apache.lucene.search.BulkScorer#score(org.apache.lucene.search.Collector,int) score(Collector,int)} &mdash;
+                {@link org.apache.lucene.search.BulkScorer#score(org.apache.lucene.search.LeafCollector,int) score(LeafCollector,int)} &mdash;
 		Score all documents up to but not including the specified max document.
 	    </li>
         </ol>
@@ -563,7 +563,7 @@
 <p>If a Filter is being used, some initial setup is done to determine which docs to include. 
    Otherwise, we ask the Weight for a {@link org.apache.lucene.search.Scorer Scorer} for each
    {@link org.apache.lucene.index.IndexReader IndexReader} segment and proceed by calling
-   {@link org.apache.lucene.search.BulkScorer#score(org.apache.lucene.search.Collector) BulkScorer.score(Collector)}.
+   {@link org.apache.lucene.search.BulkScorer#score(org.apache.lucene.search.LeafCollector) BulkScorer.score(LeafCollector)}.
 </p>
 <p>At last, we are actually going to score some documents. The score method takes in the Collector
    (most likely the TopScoreDocCollector or TopFieldCollector) and does its business.Of course, here 
diff --git a/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java b/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java
index 262330d..ff8fae9 100644
--- a/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java
@@ -22,6 +22,8 @@
 import java.io.FilenameFilter;
 import java.io.IOException;
 import java.io.RandomAccessFile;
+import java.nio.channels.FileChannel;
+import java.nio.file.StandardOpenOption;
 
 import java.util.Collection;
 import static java.util.Collections.synchronizedSet;
@@ -402,11 +404,11 @@
     IOException exc = null;
     while (!success && retryCount < 5) {
       retryCount++;
-      RandomAccessFile file = null;
+      FileChannel file = null;
       try {
         try {
-          file = new RandomAccessFile(fullFile, "rw");
-          file.getFD().sync();
+          file = FileChannel.open(fullFile.toPath(), StandardOpenOption.WRITE);
+          file.force(true); // TODO: we probably dont care about metadata, but this is what we did before...
           success = true;
         } finally {
           if (file != null)
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
index 4268267..d6a7853 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -67,6 +67,7 @@
 import org.apache.lucene.store.SingleInstanceLockFactory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.Constants;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.SetOnce;
@@ -2371,4 +2372,47 @@
     r.close();
     dir.close();
   }
+
+  // LUCENE-5574
+  public void testClosingNRTReaderDoesNotCorruptYourIndex() throws IOException {
+
+    // Windows disallows deleting & overwriting files still
+    // open for reading:
+    assumeFalse("this test can't run on Windows", Constants.WINDOWS);
+
+    MockDirectoryWrapper dir = newMockDirectory();
+
+    // Allow deletion of still open files:
+    dir.setNoDeleteOpenFile(false);
+
+    // Allow writing to same file more than once:
+    dir.setPreventDoubleWrite(false);
+
+    IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+    LogMergePolicy lmp = new LogDocMergePolicy();
+    lmp.setMergeFactor(2);
+    iwc.setMergePolicy(lmp);
+
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    Document doc = new Document();
+    doc.add(new TextField("a", "foo", Field.Store.NO));
+    w.addDocument(doc);
+    w.commit();
+    w.addDocument(doc);
+
+    // Get a new reader, but this also sets off a merge:
+    IndexReader r = w.getReader();
+    w.close();
+
+    // Blow away index and make a new writer:
+    for(String fileName : dir.listAll()) {
+      dir.deleteFile(fileName);
+    }
+
+    w = new RandomIndexWriter(random(), dir);
+    w.addDocument(doc);
+    w.close();
+    r.close();
+    dir.close();
+  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java b/lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java
index 47abea2..04fad02 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java
@@ -26,6 +26,7 @@
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.CollectionStatistics;
@@ -34,6 +35,7 @@
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TermStatistics;
 import org.apache.lucene.search.similarities.TFIDFSimilarity;
@@ -414,14 +416,12 @@
     dir.close();
   }
      
-  public static class CountingHitCollector extends Collector {
+  public static class CountingHitCollector extends SimpleCollector {
     static int count=0;
     static int sum=0;
     private int docBase = -1;
     CountingHitCollector(){count=0;sum=0;}
     @Override
-    public void setScorer(Scorer scorer) throws IOException {}
-    @Override
     public void collect(int doc) throws IOException {
       count++;
       sum += doc + docBase;  // use it to avoid any possibility of being merged away
@@ -431,7 +431,7 @@
     public static int getSum() { return sum; }
     
     @Override
-    public void setNextReader(AtomicReaderContext context) {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       docBase = context.docBase;
     }
     @Override
diff --git a/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java b/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java
index ee47759..f09d992 100644
--- a/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java
+++ b/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java
@@ -17,6 +17,8 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
@@ -37,7 +39,7 @@
 
   private static final String UNSUPPORTED_MSG = "unsupported: used for back-compat testing only !";
 
-  static final class JustCompileCollector extends Collector {
+  static final class JustCompileCollector extends SimpleCollector {
 
     @Override
     public void collect(int doc) {
@@ -45,7 +47,7 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       throw new UnsupportedOperationException(UNSUPPORTED_MSG);
     }
 
@@ -290,7 +292,7 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       throw new UnsupportedOperationException(UNSUPPORTED_MSG);
     }
 
diff --git a/lucene/core/src/test/org/apache/lucene/search/MultiCollectorTest.java b/lucene/core/src/test/org/apache/lucene/search/MultiCollectorTest.java
index 2a63f49..5a7df3c 100644
--- a/lucene/core/src/test/org/apache/lucene/search/MultiCollectorTest.java
+++ b/lucene/core/src/test/org/apache/lucene/search/MultiCollectorTest.java
@@ -27,7 +27,7 @@
 
 public class MultiCollectorTest extends LuceneTestCase {
 
-  private static class DummyCollector extends Collector {
+  private static class DummyCollector extends SimpleCollector {
 
     boolean acceptsDocsOutOfOrderCalled = false;
     boolean collectCalled = false;
@@ -46,7 +46,7 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       setNextReaderCalled = true;
     }
 
@@ -71,10 +71,11 @@
     // doesn't, an NPE would be thrown.
     Collector c = MultiCollector.wrap(new DummyCollector(), null, new DummyCollector());
     assertTrue(c instanceof MultiCollector);
-    assertTrue(c.acceptsDocsOutOfOrder());
-    c.collect(1);
-    c.setNextReader(null);
-    c.setScorer(null);
+    final LeafCollector ac = c.getLeafCollector(null);
+    assertTrue(ac.acceptsDocsOutOfOrder());
+    ac.collect(1);
+    c.getLeafCollector(null);
+    c.getLeafCollector(null).setScorer(null);
   }
 
   @Test
@@ -93,10 +94,11 @@
     // doesn't, an NPE would be thrown.
     DummyCollector[] dcs = new DummyCollector[] { new DummyCollector(), new DummyCollector() };
     Collector c = MultiCollector.wrap(dcs);
-    assertTrue(c.acceptsDocsOutOfOrder());
-    c.collect(1);
-    c.setNextReader(null);
-    c.setScorer(null);
+    LeafCollector ac = c.getLeafCollector(null);
+    assertTrue(ac.acceptsDocsOutOfOrder());
+    ac.collect(1);
+    ac = c.getLeafCollector(null);
+    ac.setScorer(null);
 
     for (DummyCollector dc : dcs) {
       assertTrue(dc.acceptsDocsOutOfOrderCalled);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanOr.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanOr.java
index b1ba0f1..137b4ce 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanOr.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanOr.java
@@ -187,10 +187,7 @@
 
     final FixedBitSet hits = new FixedBitSet(docCount);
     final AtomicInteger end = new AtomicInteger();
-    Collector c = new Collector() {
-        @Override
-        public void setNextReader(AtomicReaderContext sub) {
-        }
+    LeafCollector c = new SimpleCollector() {
 
         @Override
         public void collect(int doc) {
@@ -199,10 +196,6 @@
         }
 
         @Override
-        public void setScorer(Scorer scorer) {
-        }
-
-        @Override
         public boolean acceptsDocsOutOfOrder() {
           return true;
         }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
index 175061a..5e61f69 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
@@ -121,46 +121,45 @@
     return collector.docCounts;
   }
   
-  static class MyCollector extends Collector {
-    
-    private TopDocsCollector<ScoreDoc> collector;
-    private int docBase;
+  static class MyCollector extends FilterCollector {
 
     public final Map<Integer,Integer> docCounts = new HashMap<>();
     private final Set<Scorer> tqsSet = new HashSet<>();
     
     MyCollector() {
-      collector = TopScoreDocCollector.create(10, true);
+      super(TopScoreDocCollector.create(10, true));
     }
 
-    @Override
-    public boolean acceptsDocsOutOfOrder() {
-      return false;
-    }
-
-    @Override
-    public void collect(int doc) throws IOException {
-      int freq = 0;
-      for(Scorer scorer : tqsSet) {
-        if (doc == scorer.docID()) {
-          freq += scorer.freq();
+    public LeafCollector getLeafCollector(AtomicReaderContext context)
+        throws IOException {
+      final int docBase = context.docBase;
+      return new FilterLeafCollector(super.getLeafCollector(context)) {
+        
+        @Override
+        public boolean acceptsDocsOutOfOrder() {
+          return false;
         }
-      }
-      docCounts.put(doc + docBase, freq);
-      collector.collect(doc);
-    }
-
-    @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
-      this.docBase = context.docBase;
-      collector.setNextReader(context);
-    }
-
-    @Override
-    public void setScorer(Scorer scorer) throws IOException {
-      collector.setScorer(scorer);
-      tqsSet.clear();
-      fillLeaves(scorer, tqsSet);
+        
+        @Override
+        public void setScorer(Scorer scorer) throws IOException {
+          super.setScorer(scorer);
+          tqsSet.clear();
+          fillLeaves(scorer, tqsSet);
+        }
+        
+        @Override
+        public void collect(int doc) throws IOException {
+          int freq = 0;
+          for(Scorer scorer : tqsSet) {
+            if (doc == scorer.docID()) {
+              freq += scorer.freq();
+            }
+          }
+          docCounts.put(doc + docBase, freq);
+          super.collect(doc);
+        }
+        
+      };
     }
     
     private void fillLeaves(Scorer scorer, Set<Scorer> set) {
@@ -174,11 +173,12 @@
     }
     
     public TopDocs topDocs(){
-      return collector.topDocs();
+      return ((TopDocsCollector<?>) in).topDocs();
     }
     
     public int freq(int doc) throws IOException {
       return docCounts.get(doc);
     }
+    
   }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
index 68227c6..f1ed5bc 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
@@ -84,7 +84,7 @@
       private int doc = -1;
 
       @Override
-      public boolean score(Collector c, int maxDoc) throws IOException {
+      public boolean score(LeafCollector c, int maxDoc) throws IOException {
         assert doc == -1;
         doc = 3000;
         FakeScorer fs = new FakeScorer();
@@ -99,7 +99,7 @@
     BooleanScorer bs = new BooleanScorer(weight, false, 1, Arrays.asList(scorers), Collections.<BulkScorer>emptyList(), scorers.length);
 
     final List<Integer> hits = new ArrayList<>();
-    bs.score(new Collector() {
+    bs.score(new SimpleCollector() {
       int docBase;
       @Override
       public void setScorer(Scorer scorer) {
@@ -111,7 +111,7 @@
       }
       
       @Override
-      public void setNextReader(AtomicReaderContext context) {
+      protected void doSetNextReader(AtomicReaderContext context) throws IOException {
         docBase = context.docBase;
       }
       
@@ -138,7 +138,8 @@
     w.addDocument(doc);
     final IndexReader r = w.getReader();
     w.close();
-    final IndexSearcher s = newSearcher(r);
+    // we don't wrap with AssertingIndexSearcher in order to have the original scorer in setScorer.
+    final IndexSearcher s = newSearcher(r, true, false);
 
     final BooleanQuery q = new BooleanQuery();
     for(int term=0;term<33;term++) {
@@ -149,12 +150,12 @@
                             BooleanClause.Occur.SHOULD));
                             
     final int[] count = new int[1];
-    s.search(q, new Collector() {
+    s.search(q, new SimpleCollector() {
     
       @Override
       public void setScorer(Scorer scorer) {
         // Make sure we got BooleanScorer:
-        final Class<?> clazz = scorer instanceof AssertingScorer ? ((AssertingScorer) scorer).getIn().getClass() : scorer.getClass();
+        final Class<?> clazz = scorer.getClass();
         assertEquals("Scorer is implemented by wrong class", FakeScorer.class.getName(), clazz.getName());
       }
       
@@ -164,10 +165,6 @@
       }
       
       @Override
-      public void setNextReader(AtomicReaderContext context) {
-      }
-      
-      @Override
       public boolean acceptsDocsOutOfOrder() {
         return true;
       }
@@ -219,7 +216,7 @@
           return new BulkScorer() {
 
             @Override
-            public boolean score(Collector collector, int max) throws IOException {
+            public boolean score(LeafCollector collector, int max) throws IOException {
               collector.setScorer(new FakeScorer());
               collector.collect(0);
               return false;
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java
index f493d42..e842909 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java
@@ -17,11 +17,10 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.index.AtomicReaderContext;
-import org.apache.lucene.util.LuceneTestCase;
-
 import java.io.IOException;
 
+import org.apache.lucene.util.LuceneTestCase;
+
 public class TestCachingCollector extends LuceneTestCase {
 
   private static final double ONE_BYTE = 1.0 / (1024 * 1024); // 1 byte out of MB
@@ -53,24 +52,18 @@
     } 
   }
   
-  private static class NoOpCollector extends Collector {
+  private static class NoOpCollector extends SimpleCollector {
 
     private final boolean acceptDocsOutOfOrder;
     
     public NoOpCollector(boolean acceptDocsOutOfOrder) {
       this.acceptDocsOutOfOrder = acceptDocsOutOfOrder;
     }
-    
-    @Override
-    public void setScorer(Scorer scorer) throws IOException {}
 
     @Override
     public void collect(int doc) throws IOException {}
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {}
-
-    @Override
     public boolean acceptsDocsOutOfOrder() {
       return acceptDocsOutOfOrder;
     }
@@ -80,24 +73,19 @@
   public void testBasic() throws Exception {
     for (boolean cacheScores : new boolean[] { false, true }) {
       CachingCollector cc = CachingCollector.create(new NoOpCollector(false), cacheScores, 1.0);
-      cc.setScorer(new MockScorer());
+      LeafCollector acc = cc.getLeafCollector(null);
+      acc.setScorer(new MockScorer());
 
       // collect 1000 docs
       for (int i = 0; i < 1000; i++) {
-        cc.collect(i);
+        acc.collect(i);
       }
 
       // now replay them
-      cc.replay(new Collector() {
+      cc.replay(new SimpleCollector() {
         int prevDocID = -1;
 
         @Override
-        public void setScorer(Scorer scorer) {}
-
-        @Override
-        public void setNextReader(AtomicReaderContext context) {}
-
-        @Override
         public void collect(int doc) {
           assertEquals(prevDocID + 1, doc);
           prevDocID = doc;
@@ -113,11 +101,12 @@
   
   public void testIllegalStateOnReplay() throws Exception {
     CachingCollector cc = CachingCollector.create(new NoOpCollector(false), true, 50 * ONE_BYTE);
-    cc.setScorer(new MockScorer());
+    LeafCollector acc = cc.getLeafCollector(null);
+    acc.setScorer(new MockScorer());
     
     // collect 130 docs, this should be enough for triggering cache abort.
     for (int i = 0; i < 130; i++) {
-      cc.collect(i);
+      acc.collect(i);
     }
     
     assertFalse("CachingCollector should not be cached due to low memory limit", cc.isCached());
@@ -135,16 +124,18 @@
     // is valid with the Collector passed to the ctor
     
     // 'src' Collector does not support out-of-order
-    CachingCollector cc = CachingCollector.create(new NoOpCollector(false), true, 50 * ONE_BYTE);
-    cc.setScorer(new MockScorer());
-    for (int i = 0; i < 10; i++) cc.collect(i);
+    CachingCollector cc = CachingCollector.create(new NoOpCollector(false), true, 100 * ONE_BYTE);
+    LeafCollector acc = cc.getLeafCollector(null);
+    acc.setScorer(new MockScorer());
+    for (int i = 0; i < 10; i++) acc.collect(i);
     cc.replay(new NoOpCollector(true)); // this call should not fail
     cc.replay(new NoOpCollector(false)); // this call should not fail
 
     // 'src' Collector supports out-of-order
-    cc = CachingCollector.create(new NoOpCollector(true), true, 50 * ONE_BYTE);
-    cc.setScorer(new MockScorer());
-    for (int i = 0; i < 10; i++) cc.collect(i);
+    cc = CachingCollector.create(new NoOpCollector(true), true, 100 * ONE_BYTE);
+    acc = cc.getLeafCollector(null);
+    acc.setScorer(new MockScorer());
+    for (int i = 0; i < 10; i++) acc.collect(i);
     cc.replay(new NoOpCollector(true)); // this call should not fail
     try {
       cc.replay(new NoOpCollector(false)); // this call should fail
@@ -165,12 +156,13 @@
       int bytesPerDoc = cacheScores ? 8 : 4;
       CachingCollector cc = CachingCollector.create(new NoOpCollector(false),
           cacheScores, bytesPerDoc * ONE_BYTE * numDocs);
-      cc.setScorer(new MockScorer());
-      for (int i = 0; i < numDocs; i++) cc.collect(i);
+      LeafCollector acc = cc.getLeafCollector(null);
+      acc.setScorer(new MockScorer());
+      for (int i = 0; i < numDocs; i++) acc.collect(i);
       assertTrue(cc.isCached());
 
       // The 151's document should terminate caching
-      cc.collect(numDocs);
+      acc.collect(numDocs);
       assertFalse(cc.isCached());
     }
   }
@@ -179,9 +171,9 @@
     for (boolean cacheScores : new boolean[] { false, true }) {
       // create w/ null wrapped collector, and test that the methods work
       CachingCollector cc = CachingCollector.create(true, cacheScores, 50 * ONE_BYTE);
-      cc.setNextReader(null);
-      cc.setScorer(new MockScorer());
-      cc.collect(0);
+      LeafCollector acc = cc.getLeafCollector(null);
+      acc.setScorer(new MockScorer());
+      acc.collect(0);
       
       assertTrue(cc.isCached());
       cc.replay(new NoOpCollector(true));
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
index 21a6a7d..741934f 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
@@ -50,7 +50,7 @@
   
   private void checkHits(IndexSearcher searcher, Query q, final float expectedScore, final String scorerClassName, final String innerScorerClassName) throws IOException {
     final int[] count = new int[1];
-    searcher.search(q, new Collector() {
+    searcher.search(q, new SimpleCollector() {
       private Scorer scorer;
     
       @Override
@@ -70,10 +70,6 @@
       }
       
       @Override
-      public void setNextReader(AtomicReaderContext context) {
-      }
-      
-      @Override
       public boolean acceptsDocsOutOfOrder() {
         return true;
       }
@@ -95,7 +91,8 @@
 
       reader = writer.getReader();
       writer.close();
-      searcher = newSearcher(reader);
+      // we don't wrap with AssertingIndexSearcher in order to have the original scorer in setScorer.
+      searcher = newSearcher(reader, true, false);
       
       // set a similarity that does not normalize our boost away
       searcher.setSimilarity(new DefaultSimilarity() {
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocBoost.java b/lucene/core/src/test/org/apache/lucene/search/TestDocBoost.java
index 3cf2e85..aced6f6 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDocBoost.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDocBoost.java
@@ -59,7 +59,7 @@
     IndexSearcher searcher = newSearcher(reader);
     searcher.search
       (new TermQuery(new Term("field", "word")),
-       new Collector() {
+       new SimpleCollector() {
          private int base = 0;
          private Scorer scorer;
          @Override
@@ -71,7 +71,7 @@
            scores[doc + base] = scorer.score();
          }
          @Override
-         public void setNextReader(AtomicReaderContext context) {
+         protected void doSetNextReader(AtomicReaderContext context) throws IOException {
            base = context.docBase;
          }
          @Override
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestEarlyTermination.java b/lucene/core/src/test/org/apache/lucene/search/TestEarlyTermination.java
index acbdaf7..7388d00 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestEarlyTermination.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestEarlyTermination.java
@@ -58,15 +58,12 @@
 
     for (int i = 0; i < iters; ++i) {
       final IndexSearcher searcher = newSearcher(reader);
-      final Collector collector = new Collector() {
+      final Collector collector = new SimpleCollector() {
 
         final boolean outOfOrder = random().nextBoolean();
         boolean collectionTerminated = true;
 
         @Override
-        public void setScorer(Scorer scorer) throws IOException {}
-
-        @Override
         public void collect(int doc) throws IOException {
           assertFalse(collectionTerminated);
           if (rarely()) {
@@ -76,7 +73,7 @@
         }
 
         @Override
-        public void setNextReader(AtomicReaderContext context) throws IOException {
+        protected void doSetNextReader(AtomicReaderContext context) throws IOException {
           if (random().nextBoolean()) {
             collectionTerminated = true;
             throw new CollectionTerminatedException();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
index d286d54..9f5f438 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
@@ -226,7 +226,7 @@
     search.setSimilarity(new DefaultSimilarity());
     Query q = csrq("data", "1", "6", T, T);
     q.setBoost(100);
-    search.search(q, null, new Collector() {
+    search.search(q, null, new SimpleCollector() {
       private int base = 0;
       private Scorer scorer;
       @Override
@@ -238,7 +238,7 @@
         assertEquals("score for doc " + (doc + base) + " was not correct", 1.0f, scorer.score(), SCORE_COMP_THRESH);
       }
       @Override
-      public void setNextReader(AtomicReaderContext context) {
+      protected void doSetNextReader(AtomicReaderContext context) throws IOException {
         base = context.docBase;
       }
       @Override
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java
index 1f52b21..4a51978 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java
@@ -22,6 +22,7 @@
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.document.Document;
 
 public class TestPositiveScoresOnlyCollector extends LuceneTestCase {
 
@@ -78,6 +79,7 @@
     
     Directory directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
+    writer.addDocument(new Document());
     writer.commit();
     IndexReader ir = writer.getReader();
     writer.close();
@@ -86,9 +88,10 @@
     Scorer s = new SimpleScorer(fake);
     TopDocsCollector<ScoreDoc> tdc = TopScoreDocCollector.create(scores.length, true);
     Collector c = new PositiveScoresOnlyCollector(tdc);
-    c.setScorer(s);
+    LeafCollector ac = c.getLeafCollector(ir.leaves().get(0));
+    ac.setScorer(s);
     while (s.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
-      c.collect(0);
+      ac.collect(0);
     }
     TopDocs td = tdc.topDocs();
     ScoreDoc[] sd = td.scoreDocs;
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java
index 9c6f486..c48c5f1 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java
@@ -65,7 +65,7 @@
     }
   }
   
-  private static final class ScoreCachingCollector extends Collector {
+  private static final class ScoreCachingCollector extends SimpleCollector {
 
     private int idx = 0;
     private Scorer scorer;
@@ -88,9 +88,6 @@
       ++idx;
     }
 
-    @Override public void setNextReader(AtomicReaderContext context) {
-    }
-
     @Override public void setScorer(Scorer scorer) {
       this.scorer = new ScoreCachingWrappingScorer(scorer);
     }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestScorerPerf.java b/lucene/core/src/test/org/apache/lucene/search/TestScorerPerf.java
index 48cbaee..97dcc44 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestScorerPerf.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestScorerPerf.java
@@ -97,13 +97,10 @@
     return sets;
   }
 
-  public static class CountingHitCollector extends Collector {
+  public static class CountingHitCollector extends SimpleCollector {
     int count=0;
     int sum=0;
     protected int docBase = 0;
-
-    @Override
-    public void setScorer(Scorer scorer) throws IOException {}
     
     @Override
     public void collect(int doc) {
@@ -115,7 +112,7 @@
     public int getSum() { return sum; }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       docBase = context.docBase;
     }
     @Override
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSimilarity.java b/lucene/core/src/test/org/apache/lucene/search/TestSimilarity.java
index 31ebc14..3a3c580 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSimilarity.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSimilarity.java
@@ -76,7 +76,7 @@
     Term b = new Term("field", "b");
     Term c = new Term("field", "c");
 
-    searcher.search(new TermQuery(b), new Collector() {
+    searcher.search(new TermQuery(b), new SimpleCollector() {
          private Scorer scorer;
          @Override
         public void setScorer(Scorer scorer) {
@@ -86,9 +86,6 @@
         public final void collect(int doc) throws IOException {
            assertEquals(1.0f, scorer.score(), 0);
          }
-         @Override
-        public void setNextReader(AtomicReaderContext context) {}
-         @Override
         public boolean acceptsDocsOutOfOrder() {
            return true;
          }
@@ -98,7 +95,7 @@
     bq.add(new TermQuery(a), BooleanClause.Occur.SHOULD);
     bq.add(new TermQuery(b), BooleanClause.Occur.SHOULD);
     //System.out.println(bq.toString("field"));
-    searcher.search(bq, new Collector() {
+    searcher.search(bq, new SimpleCollector() {
          private int base = 0;
          private Scorer scorer;
          @Override
@@ -111,7 +108,7 @@
            assertEquals((float)doc+base+1, scorer.score(), 0);
          }
          @Override
-        public void setNextReader(AtomicReaderContext context) {
+         protected void doSetNextReader(AtomicReaderContext context) throws IOException {
            base = context.docBase;
          }
          @Override
@@ -125,7 +122,7 @@
     pq.add(c);
     //System.out.println(pq.toString("field"));
     searcher.search(pq,
-       new Collector() {
+       new SimpleCollector() {
          private Scorer scorer;
          @Override
          public void setScorer(Scorer scorer) {
@@ -137,8 +134,6 @@
            assertEquals(1.0f, scorer.score(), 0);
          }
          @Override
-         public void setNextReader(AtomicReaderContext context) {}
-         @Override
          public boolean acceptsDocsOutOfOrder() {
            return true;
          }
@@ -146,7 +141,7 @@
 
     pq.setSlop(2);
     //System.out.println(pq.toString("field"));
-    searcher.search(pq, new Collector() {
+    searcher.search(pq, new SimpleCollector() {
       private Scorer scorer;
       @Override
       public void setScorer(Scorer scorer) {
@@ -158,8 +153,6 @@
         assertEquals(2.0f, scorer.score(), 0);
       }
       @Override
-      public void setNextReader(AtomicReaderContext context) {}
-      @Override
       public boolean acceptsDocsOutOfOrder() {
         return true;
       }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
index fdde532..8567419 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
@@ -175,7 +175,7 @@
     return query;
   }
 
-  static class MaxFreqCollector extends Collector {
+  static class MaxFreqCollector extends SimpleCollector {
     float max;
     int totalHits;
     Scorer scorer;
@@ -192,10 +192,6 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {      
-    }
-
-    @Override
     public boolean acceptsDocsOutOfOrder() {
       return false;
     }
@@ -203,7 +199,7 @@
   
   /** checks that no scores or freqs are infinite */
   private void assertSaneScoring(PhraseQuery pq, IndexSearcher searcher) throws Exception {
-    searcher.search(pq, new Collector() {
+    searcher.search(pq, new SimpleCollector() {
       Scorer scorer;
       
       @Override
@@ -218,11 +214,6 @@
       }
       
       @Override
-      public void setNextReader(AtomicReaderContext context) {
-        // do nothing
-      }
-      
-      @Override
       public boolean acceptsDocsOutOfOrder() {
         return false;
       }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java b/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java
index 063d26b..def8988 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java
@@ -65,10 +65,7 @@
     dir = null;
   }
 
-  private static class CountingCollector extends Collector {
-    private final Collector other;
-    private int docBase;
-
+  private static class CountingCollector extends FilterCollector {
     public final Map<Integer, Map<Query, Float>> docCounts = new HashMap<>();
 
     private final Map<Query, Scorer> subScorers = new HashMap<>();
@@ -79,16 +76,9 @@
     }
 
     public CountingCollector(Collector other, Set<String> relationships) {
-      this.other = other;
+      super(other);
       this.relationships = relationships;
     }
-
-    @Override
-    public void setScorer(Scorer scorer) throws IOException {
-      other.setScorer(scorer);
-      subScorers.clear();
-      setSubScorers(scorer, "TOP");
-    }
     
     public void setSubScorers(Scorer scorer, String relationship) {
       for (ChildScorer child : scorer.getChildren()) {
@@ -98,30 +88,34 @@
       }
       subScorers.put(scorer.getWeight().getQuery(), scorer);
     }
-
-    @Override
-    public void collect(int doc) throws IOException {
-      final Map<Query, Float> freqs = new HashMap<>();
-      for (Map.Entry<Query, Scorer> ent : subScorers.entrySet()) {
-        Scorer value = ent.getValue();
-        int matchId = value.docID();
-        freqs.put(ent.getKey(), matchId == doc ? value.freq() : 0.0f);
-      }
-      docCounts.put(doc + docBase, freqs);
-      other.collect(doc);
-    }
-
-    @Override
-    public void setNextReader(AtomicReaderContext context)
+    
+    public LeafCollector getLeafCollector(AtomicReaderContext context)
         throws IOException {
-      docBase = context.docBase;
-      other.setNextReader(context);
+      final int docBase = context.docBase;
+      return new FilterLeafCollector(super.getLeafCollector(context)) {
+        
+        @Override
+        public void collect(int doc) throws IOException {
+          final Map<Query, Float> freqs = new HashMap<Query, Float>();
+          for (Map.Entry<Query, Scorer> ent : subScorers.entrySet()) {
+            Scorer value = ent.getValue();
+            int matchId = value.docID();
+            freqs.put(ent.getKey(), matchId == doc ? value.freq() : 0.0f);
+          }
+          docCounts.put(doc + docBase, freqs);
+          super.collect(doc);
+        }
+        
+        @Override
+        public void setScorer(Scorer scorer) throws IOException {
+          super.setScorer(scorer);
+          subScorers.clear();
+          setSubScorers(scorer, "TOP");
+        }
+        
+      };
     }
 
-    @Override
-    public boolean acceptsDocsOutOfOrder() {
-      return other.acceptsDocsOutOfOrder();
-    }
   }
 
   private static final float FLOAT_TOLERANCE = 0.00001F;
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
index 6b4f474..44faa83 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
@@ -84,7 +84,7 @@
     final List<TestHit> docs = new ArrayList<>();
     // must call next first
     
-    ts.score(new Collector() {
+    ts.score(new SimpleCollector() {
       private int base = 0;
       private Scorer scorer;
       
@@ -104,7 +104,7 @@
       }
       
       @Override
-      public void setNextReader(AtomicReaderContext context) {
+      protected void doSetNextReader(AtomicReaderContext context) throws IOException {
         base = context.docBase;
       }
       
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
index 537e30f..85239c6 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
@@ -307,7 +307,7 @@
   }
   
   // counting collector that can slow down at collect().
-  private class MyHitCollector extends Collector {
+  private class MyHitCollector extends SimpleCollector {
     private final BitSet bits = new BitSet();
     private int slowdown = 0;
     private int lastDocCollected = -1;
@@ -349,7 +349,7 @@
     }
     
     @Override
-    public void setNextReader(AtomicReaderContext context) {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       docBase = context.docBase;
     }
     
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTopDocsCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestTopDocsCollector.java
index f8f728f..0c56e11 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTopDocsCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTopDocsCollector.java
@@ -61,7 +61,7 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       base = context.docBase;
     }
 
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java
index e500a0d..e26b66f 100644
--- a/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java
+++ b/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java
@@ -22,6 +22,7 @@
 import java.io.IOException;
 import java.nio.file.NoSuchFileException;
 import java.util.Arrays;
+import java.util.Collections;
 
 import org.apache.lucene.store.MockDirectoryWrapper.Throttling;
 import org.apache.lucene.util.LuceneTestCase;
@@ -288,5 +289,34 @@
       TestUtil.rm(path);
     }
   }
+  
+  public void testFsyncDoesntCreateNewFiles() throws Exception {
+    File path = TestUtil.getTempDir("nocreate");
+    Directory fsdir = new SimpleFSDirectory(path);
+    
+    // write a file
+    IndexOutput out = fsdir.createOutput("afile", newIOContext(random()));
+    out.writeString("boo");
+    out.close();
+    
+    // delete it
+    assertTrue(new File(path, "afile").delete());
+    
+    // directory is empty
+    assertEquals(0, fsdir.listAll().length);
+    
+    // fsync it
+    try {
+      fsdir.sync(Collections.singleton("afile"));
+      fail("didn't get expected exception, instead fsync created new files: " + Arrays.asList(fsdir.listAll()));
+    } catch (FileNotFoundException | NoSuchFileException expected) {
+      // ok
+    }
+    
+    // directory is still empty
+    assertEquals(0, fsdir.listAll().length);
+    
+    fsdir.close();
+  }
 }
 
diff --git a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestFailIfUnreferencedFiles.java b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestFailIfUnreferencedFiles.java
index d8068f0..0090e6d 100644
--- a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestFailIfUnreferencedFiles.java
+++ b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestFailIfUnreferencedFiles.java
@@ -22,15 +22,14 @@
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.MockDirectoryWrapper;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.runner.JUnitCore;
 import org.junit.runner.Result;
 import org.junit.runner.notification.Failure;
-
 import com.carrotsearch.randomizedtesting.RandomizedTest;
 
 // LUCENE-4456: Test that we fail if there are unreferenced files
@@ -41,7 +40,8 @@
   
   public static class Nested1 extends WithNestedTests.AbstractNestedTest {
     public void testDummy() throws Exception {
-      Directory dir = newMockDirectory();
+      MockDirectoryWrapper dir = newMockDirectory();
+      dir.setAssertNoUnrefencedFilesOnClose(true);
       IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, null));
       iw.addDocument(new Document());
       iw.close();
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/DrillSideways.java b/lucene/facet/src/java/org/apache/lucene/facet/DrillSideways.java
index 01b3b6c..c8614f3 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/DrillSideways.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/DrillSideways.java
@@ -26,6 +26,7 @@
 import org.apache.lucene.facet.sortedset.SortedSetDocValuesReaderState;
 import org.apache.lucene.facet.taxonomy.FastTaxonomyFacetCounts;
 import org.apache.lucene.facet.taxonomy.TaxonomyReader;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.Collector;
@@ -238,7 +239,7 @@
    *  default is false.  Note that if you return true from
    *  this method (in a subclass) be sure your collector
    *  also returns false from {@link
-   *  Collector#acceptsDocsOutOfOrder}: this will trick
+   *  LeafCollector#acceptsDocsOutOfOrder}: this will trick
    *  {@code BooleanQuery} into also scoring all subDocs at
    *  once. */
   protected boolean scoreSubDocsAtOnce() {
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java b/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java
index 1d08bf7..273b6b1 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java
@@ -23,6 +23,7 @@
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.Scorer;
@@ -36,6 +37,7 @@
   //private static boolean DEBUG = false;
 
   private final Collector drillDownCollector;
+  private LeafCollector drillDownLeafCollector;
 
   private final DocsAndCost[] dims;
 
@@ -62,7 +64,7 @@
   }
 
   @Override
-  public boolean score(Collector collector, int maxDoc) throws IOException {
+  public boolean score(LeafCollector collector, int maxDoc) throws IOException {
     if (maxDoc != Integer.MAX_VALUE) {
       throw new IllegalArgumentException("maxDoc must be Integer.MAX_VALUE");
     }
@@ -73,12 +75,14 @@
     FakeScorer scorer = new FakeScorer();
     collector.setScorer(scorer);
     if (drillDownCollector != null) {
-      drillDownCollector.setScorer(scorer);
-      drillDownCollector.setNextReader(context);
+      drillDownLeafCollector = drillDownCollector.getLeafCollector(context);
+      drillDownLeafCollector.setScorer(scorer);
+    } else {
+      drillDownLeafCollector = null;
     }
     for (DocsAndCost dim : dims) {
-      dim.sidewaysCollector.setScorer(scorer);
-      dim.sidewaysCollector.setNextReader(context);
+      dim.sidewaysLeafCollector = dim.sidewaysCollector.getLeafCollector(context);
+      dim.sidewaysLeafCollector.setScorer(scorer);
     }
 
     // TODO: if we ever allow null baseScorer ... it will
@@ -100,10 +104,10 @@
     final int numDims = dims.length;
 
     Bits[] bits = new Bits[numBits];
-    Collector[] bitsSidewaysCollectors = new Collector[numBits];
+    LeafCollector[] bitsSidewaysCollectors = new LeafCollector[numBits];
 
     DocIdSetIterator[] disis = new DocIdSetIterator[numDims-numBits];
-    Collector[] sidewaysCollectors = new Collector[numDims-numBits];
+    LeafCollector[] sidewaysCollectors = new LeafCollector[numDims-numBits];
     long drillDownCost = 0;
     int disiUpto = 0;
     int bitsUpto = 0;
@@ -111,14 +115,14 @@
       DocIdSetIterator disi = dims[dim].disi;
       if (dims[dim].bits == null) {
         disis[disiUpto] = disi;
-        sidewaysCollectors[disiUpto] = dims[dim].sidewaysCollector;
+        sidewaysCollectors[disiUpto] = dims[dim].sidewaysLeafCollector;
         disiUpto++;
         if (disi != null) {
           drillDownCost += disi.cost();
         }
       } else {
         bits[bitsUpto] = dims[dim].bits;
-        bitsSidewaysCollectors[bitsUpto] = dims[dim].sidewaysCollector;
+        bitsSidewaysCollectors[bitsUpto] = dims[dim].sidewaysLeafCollector;
         bitsUpto++;
       }
     }
@@ -154,15 +158,15 @@
    *  (i.e., like BooleanScorer2, not BooleanScorer).  In
    *  this case we just .next() on base and .advance() on
    *  the dim filters. */ 
-  private void doQueryFirstScoring(Collector collector, DocIdSetIterator[] disis, Collector[] sidewaysCollectors,
-                                   Bits[] bits, Collector[] bitsSidewaysCollectors) throws IOException {
+  private void doQueryFirstScoring(LeafCollector collector, DocIdSetIterator[] disis, LeafCollector[] sidewaysCollectors,
+                                   Bits[] bits, LeafCollector[] bitsSidewaysCollectors) throws IOException {
     //if (DEBUG) {
     //  System.out.println("  doQueryFirstScoring");
     //}
     int docID = baseScorer.docID();
 
     nextDoc: while (docID != DocsEnum.NO_MORE_DOCS) {
-      Collector failedCollector = null;
+      LeafCollector failedCollector = null;
       for (int i=0;i<disis.length;i++) {
         // TODO: should we sort this 2nd dimension of
         // docsEnums from most frequent to least?
@@ -225,7 +229,7 @@
 
   /** Used when drill downs are highly constraining vs
    *  baseQuery. */
-  private void doDrillDownAdvanceScoring(Collector collector, DocIdSetIterator[] disis, Collector[] sidewaysCollectors) throws IOException {
+  private void doDrillDownAdvanceScoring(LeafCollector collector, DocIdSetIterator[] disis, LeafCollector[] sidewaysCollectors) throws IOException {
     final int maxDoc = context.reader().maxDoc();
     final int numDims = dims.length;
 
@@ -423,7 +427,7 @@
     }
   }
 
-  private void doUnionScoring(Collector collector, DocIdSetIterator[] disis, Collector[] sidewaysCollectors) throws IOException {
+  private void doUnionScoring(LeafCollector collector, DocIdSetIterator[] disis, LeafCollector[] sidewaysCollectors) throws IOException {
     //if (DEBUG) {
     //  System.out.println("  doUnionScoring");
     //}
@@ -569,14 +573,14 @@
     }
   }
 
-  private void collectHit(Collector collector, Collector[] sidewaysCollectors) throws IOException {
+  private void collectHit(LeafCollector collector, LeafCollector[] sidewaysCollectors) throws IOException {
     //if (DEBUG) {
     //  System.out.println("      hit");
     //}
 
     collector.collect(collectDocID);
     if (drillDownCollector != null) {
-      drillDownCollector.collect(collectDocID);
+      drillDownLeafCollector.collect(collectDocID);
     }
 
     // TODO: we could "fix" faceting of the sideways counts
@@ -589,14 +593,14 @@
     }
   }
 
-  private void collectHit(Collector collector, Collector[] sidewaysCollectors, Collector[] sidewaysCollectors2) throws IOException {
+  private void collectHit(LeafCollector collector, LeafCollector[] sidewaysCollectors, LeafCollector[] sidewaysCollectors2) throws IOException {
     //if (DEBUG) {
     //  System.out.println("      hit");
     //}
 
     collector.collect(collectDocID);
     if (drillDownCollector != null) {
-      drillDownCollector.collect(collectDocID);
+      drillDownLeafCollector.collect(collectDocID);
     }
 
     // TODO: we could "fix" faceting of the sideways counts
@@ -612,7 +616,7 @@
     }
   }
 
-  private void collectNearMiss(Collector sidewaysCollector) throws IOException {
+  private void collectNearMiss(LeafCollector sidewaysCollector) throws IOException {
     //if (DEBUG) {
     //  System.out.println("      missingDim=" + dim);
     //}
@@ -620,8 +624,6 @@
   }
 
   private final class FakeScorer extends Scorer {
-    float score;
-    int doc;
 
     public FakeScorer() {
       super(null);
@@ -674,6 +676,7 @@
     // Random access bits:
     Bits bits;
     Collector sidewaysCollector;
+    LeafCollector sidewaysLeafCollector;
     String dim;
 
     @Override
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/FacetsCollector.java b/lucene/facet/src/java/org/apache/lucene/facet/FacetsCollector.java
index 912725d..90bbba6 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/FacetsCollector.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/FacetsCollector.java
@@ -32,6 +32,7 @@
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.search.TopDocsCollector;
@@ -47,7 +48,7 @@
  *  counting.  Use the {@code search} utility methods to
  *  perform an "ordinary" search but also collect into a
  *  {@link Collector}. */
-public class FacetsCollector extends Collector {
+public class FacetsCollector extends SimpleCollector {
 
   private AtomicReaderContext context;
   private Scorer scorer;
@@ -151,7 +152,7 @@
 
     return matchingDocs;
   }
-    
+
   @Override
   public final boolean acceptsDocsOutOfOrder() {
     // If we are keeping scores then we require in-order
@@ -180,7 +181,7 @@
   }
     
   @Override
-  public final void setNextReader(AtomicReaderContext context) throws IOException {
+  protected void doSetNextReader(AtomicReaderContext context) throws IOException {
     if (docs != null) {
       matchingDocs.add(new MatchingDocs(this.context, docs.getDocIdSet(), totalHits, scores));
     }
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/AssertingSubDocsAtOnceCollector.java b/lucene/facet/src/test/org/apache/lucene/facet/AssertingSubDocsAtOnceCollector.java
index 644b3ad..df104b0 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/AssertingSubDocsAtOnceCollector.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/AssertingSubDocsAtOnceCollector.java
@@ -20,14 +20,13 @@
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.lucene.index.AtomicReaderContext;
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.Scorer.ChildScorer;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Scorer.ChildScorer;
+import org.apache.lucene.search.SimpleCollector;
 
 /** Verifies in collect() that all child subScorers are on
  *  the collected doc. */
-class AssertingSubDocsAtOnceCollector extends Collector {
+class AssertingSubDocsAtOnceCollector extends SimpleCollector {
 
   // TODO: allow wrapping another Collector
 
@@ -57,10 +56,6 @@
   }
 
   @Override
-  public void setNextReader(AtomicReaderContext context) {
-  }
-
-  @Override
   public boolean acceptsDocsOutOfOrder() {
     return false;
   }
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java b/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java
index a6875cb..6847da7 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java
@@ -43,6 +43,7 @@
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.DocIdSet;
 import org.apache.lucene.search.Filter;
@@ -51,6 +52,7 @@
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.TermQuery;
@@ -666,21 +668,17 @@
       // had an AssertingScorer it could catch it when
       // Weight.scoresDocsOutOfOrder lies!:
       new DrillSideways(s, config, tr).search(ddq,
-                           new Collector() {
+                           new SimpleCollector() {
                              int lastDocID;
 
                              @Override
-                             public void setScorer(Scorer s) {
-                             }
-
-                             @Override
                              public void collect(int doc) {
                                assert doc > lastDocID;
                                lastDocID = doc;
                              }
 
                              @Override
-                             public void setNextReader(AtomicReaderContext context) {
+                             protected void doSetNextReader(AtomicReaderContext context) throws IOException {
                                lastDocID = -1;
                              }
 
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupHeadsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupHeadsCollector.java
index 31abf0b..be6d8f8 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupHeadsCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupHeadsCollector.java
@@ -17,20 +17,20 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.util.FixedBitSet;
-
 import java.io.IOException;
 import java.util.Collection;
 
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.SimpleCollector;
+import org.apache.lucene.util.FixedBitSet;
+
 /**
  * This collector specializes in collecting the most relevant document (group head) for each group that match the query.
  *
  * @lucene.experimental
  */
 @SuppressWarnings({"unchecked","rawtypes"})
-public abstract class AbstractAllGroupHeadsCollector<GH extends AbstractAllGroupHeadsCollector.GroupHead> extends Collector {
+public abstract class AbstractAllGroupHeadsCollector<GH extends AbstractAllGroupHeadsCollector.GroupHead> extends SimpleCollector {
 
   protected final int[] reversed;
   protected final int compIDXEnd;
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupsCollector.java
index 3cd9164..1677eca 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupsCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupsCollector.java
@@ -17,13 +17,13 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.util.BytesRef;
-
 import java.io.IOException;
 import java.util.Collection;
 
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
+import org.apache.lucene.util.BytesRef;
+
 /**
  * A collector that collects all groups that match the
  * query. Only the group value is collected, and the order
@@ -36,7 +36,7 @@
  *
  * @lucene.experimental
  */
-public abstract class AbstractAllGroupsCollector<GROUP_VALUE_TYPE> extends Collector {
+public abstract class AbstractAllGroupsCollector<GROUP_VALUE_TYPE> extends SimpleCollector {
 
   /**
    * Returns the total number of groups for the executed search.
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractDistinctValuesCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractDistinctValuesCollector.java
index 07fc35e..a735caf 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractDistinctValuesCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractDistinctValuesCollector.java
@@ -17,18 +17,18 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.Scorer;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
 
-import java.io.IOException;
-import java.util.*;
+import org.apache.lucene.search.SimpleCollector;
 
 /**
  * A second pass grouping collector that keeps track of distinct values for a specified field for the top N group.
  *
  * @lucene.experimental
  */
-public abstract class AbstractDistinctValuesCollector<GC extends AbstractDistinctValuesCollector.GroupCount<?>> extends Collector {
+public abstract class AbstractDistinctValuesCollector<GC extends AbstractDistinctValuesCollector.GroupCount<?>> extends SimpleCollector {
 
   /**
    * Returns all unique values for each top N group.
@@ -42,10 +42,6 @@
     return true;
   }
 
-  @Override
-  public void setScorer(Scorer scorer) throws IOException {
-  }
-
   /**
    * Returned by {@link AbstractDistinctValuesCollector#getGroups()},
    * representing the value and set of distinct values for the group.
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractFirstPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractFirstPassGroupingCollector.java
index 19b1d36..0c342ec 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractFirstPassGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractFirstPassGroupingCollector.java
@@ -33,7 +33,7 @@
  *
  * @lucene.experimental
  */
-abstract public class AbstractFirstPassGroupingCollector<GROUP_VALUE_TYPE> extends Collector {
+abstract public class AbstractFirstPassGroupingCollector<GROUP_VALUE_TYPE> extends SimpleCollector {
 
   private final Sort groupSort;
   private final FieldComparator<?>[] comparators;
@@ -326,7 +326,7 @@
   }
 
   @Override
-  public void setNextReader(AtomicReaderContext readerContext) throws IOException {
+  protected void doSetNextReader(AtomicReaderContext readerContext) throws IOException {
     docBase = readerContext.docBase;
     for (int i=0; i<comparators.length; i++) {
       comparators[i] = comparators[i].setNextReader(readerContext);
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractGroupFacetCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractGroupFacetCollector.java
index 8db044c..016f393 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractGroupFacetCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractGroupFacetCollector.java
@@ -19,6 +19,7 @@
 
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.PriorityQueue;
 
@@ -30,7 +31,7 @@
  *
  * @lucene.experimental
  */
-public abstract class AbstractGroupFacetCollector extends Collector {
+public abstract class AbstractGroupFacetCollector extends SimpleCollector {
 
   protected final String groupField;
   protected final String facetField;
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractSecondPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractSecondPassGroupingCollector.java
index 7b00012..aedfa9e 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractSecondPassGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractSecondPassGroupingCollector.java
@@ -37,7 +37,7 @@
  *
  * @lucene.experimental
  */
-public abstract class AbstractSecondPassGroupingCollector<GROUP_VALUE_TYPE> extends Collector {
+public abstract class AbstractSecondPassGroupingCollector<GROUP_VALUE_TYPE> extends SimpleCollector {
 
   protected final Map<GROUP_VALUE_TYPE, SearchGroupDocs<GROUP_VALUE_TYPE>> groupMap;
   private final int maxDocsPerGroup;
@@ -107,10 +107,10 @@
   protected abstract SearchGroupDocs<GROUP_VALUE_TYPE> retrieveGroup(int doc) throws IOException;
 
   @Override
-  public void setNextReader(AtomicReaderContext readerContext) throws IOException {
+  protected void doSetNextReader(AtomicReaderContext readerContext) throws IOException {
     //System.out.println("SP.setNextReader");
     for (SearchGroupDocs<GROUP_VALUE_TYPE> group : groupMap.values()) {
-      group.collector.setNextReader(readerContext);
+      group.collector.getLeafCollector(readerContext);
     }
   }
 
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
index 195ec23..7c33583 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
@@ -55,7 +55,7 @@
  * @lucene.experimental
  */
 
-public class BlockGroupingCollector extends Collector {
+public class BlockGroupingCollector extends SimpleCollector {
 
   private int[] pendingSubDocs;
   private float[] pendingSubScores;
@@ -350,7 +350,7 @@
       }
 
       collector.setScorer(fakeScorer);
-      collector.setNextReader(og.readerContext);
+      collector.getLeafCollector(og.readerContext);
       for(int docIDX=0;docIDX<og.count;docIDX++) {
         final int doc = og.docs[docIDX];
         fakeScorer.doc = doc;
@@ -516,7 +516,7 @@
   }
 
   @Override
-  public void setNextReader(AtomicReaderContext readerContext) throws IOException {
+  protected void doSetNextReader(AtomicReaderContext readerContext) throws IOException {
     if (subDocUpto != 0) {
       processGroup();
     }
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupHeadsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupHeadsCollector.java
index 8372ac5..64ad845 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupHeadsCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupHeadsCollector.java
@@ -20,6 +20,7 @@
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.FieldComparator;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Sort;
@@ -101,7 +102,7 @@
   }
 
   @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
+  protected void doSetNextReader(AtomicReaderContext context) throws IOException {
     this.readerContext = context;
     FunctionValues values = groupBy.getValues(vsContext, context);
     filler = values.getValueFiller();
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupsCollector.java
index c778162..d949bec 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupsCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupsCollector.java
@@ -20,6 +20,7 @@
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.grouping.AbstractAllGroupsCollector;
 import org.apache.lucene.util.mutable.MutableValue;
 
@@ -75,7 +76,7 @@
   }
 
   @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
+  protected void doSetNextReader(AtomicReaderContext context) throws IOException {
     FunctionValues values = groupBy.getValues(vsContext, context);
     filler = values.getValueFiller();
     mval = filler.getValue();
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionDistinctValuesCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionDistinctValuesCollector.java
index 3bc7074..597a196 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionDistinctValuesCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionDistinctValuesCollector.java
@@ -20,6 +20,7 @@
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.grouping.AbstractDistinctValuesCollector;
 import org.apache.lucene.search.grouping.SearchGroup;
 import org.apache.lucene.util.mutable.MutableValue;
@@ -70,7 +71,7 @@
   }
 
   @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
+  protected void doSetNextReader(AtomicReaderContext context) throws IOException {
     FunctionValues values = groupSource.getValues(vsContext, context);
     groupFiller = values.getValueFiller();
     groupMval = groupFiller.getValue();
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionFirstPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionFirstPassGroupingCollector.java
index 6355a51..b9737e1 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionFirstPassGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionFirstPassGroupingCollector.java
@@ -20,6 +20,7 @@
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.grouping.AbstractFirstPassGroupingCollector;
 import org.apache.lucene.util.mutable.MutableValue;
@@ -77,8 +78,8 @@
   }
 
   @Override
-  public void setNextReader(AtomicReaderContext readerContext) throws IOException {
-    super.setNextReader(readerContext);
+  protected void doSetNextReader(AtomicReaderContext readerContext) throws IOException {
+    super.doSetNextReader(readerContext);
     FunctionValues values = groupByVS.getValues(vsContext, readerContext);
     filler = values.getValueFiller();
     mval = filler.getValue();
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionSecondPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionSecondPassGroupingCollector.java
index d2f1d59..9df094b 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionSecondPassGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionSecondPassGroupingCollector.java
@@ -20,6 +20,7 @@
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.grouping.AbstractSecondPassGroupingCollector;
 import org.apache.lucene.search.grouping.SearchGroup;
@@ -71,8 +72,8 @@
   }
 
   @Override
-  public void setNextReader(AtomicReaderContext readerContext) throws IOException {
-    super.setNextReader(readerContext);
+  protected void doSetNextReader(AtomicReaderContext readerContext) throws IOException {
+    super.doSetNextReader(readerContext);
     FunctionValues values = groupByVS.getValues(vsContext, readerContext);
     filler = values.getValueFiller();
     mval = filler.getValue();
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java
index 2367f4d..45192c1 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java
@@ -19,6 +19,7 @@
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.FieldComparator;
 import org.apache.lucene.search.Scorer;
@@ -158,7 +159,7 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       this.readerContext = context;
       groupIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
 
@@ -273,7 +274,7 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       this.readerContext = context;
       groupIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
       for (int i = 0; i < fields.length; i++) {
@@ -441,7 +442,7 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       this.readerContext = context;
       groupIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
       for (int i = 0; i < fields.length; i++) {
@@ -584,7 +585,7 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       this.readerContext = context;
       groupIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
 
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupsCollector.java
index 5486404..0ff1e57 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupsCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupsCollector.java
@@ -19,6 +19,7 @@
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.grouping.AbstractAllGroupsCollector;
 import org.apache.lucene.util.BytesRef;
@@ -103,7 +104,7 @@
   }
 
   @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
+  protected void doSetNextReader(AtomicReaderContext context) throws IOException {
     index = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
 
     // Clear ordSet and fill it with previous encountered groups that can occur in the current segment.
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermDistinctValuesCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermDistinctValuesCollector.java
index 7dad9f0..c718dc2 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermDistinctValuesCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermDistinctValuesCollector.java
@@ -19,6 +19,7 @@
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.grouping.AbstractDistinctValuesCollector;
 import org.apache.lucene.search.grouping.SearchGroup;
@@ -107,7 +108,7 @@
   }
 
   @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
+  protected void doSetNextReader(AtomicReaderContext context) throws IOException {
     groupFieldTermIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
     countFieldTermIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), countField);
     ordSet.clear();
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java
index 70b71b8..6c708a9 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java
@@ -21,6 +21,7 @@
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.grouping.AbstractFirstPassGroupingCollector;
@@ -85,8 +86,8 @@
   }
 
   @Override
-  public void setNextReader(AtomicReaderContext readerContext) throws IOException {
-    super.setNextReader(readerContext);
+  protected void doSetNextReader(AtomicReaderContext readerContext) throws IOException {
+    super.doSetNextReader(readerContext);
     index = FieldCache.DEFAULT.getTermsIndex(readerContext.reader(), groupField);
   }
 }
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGroupFacetCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGroupFacetCollector.java
index 075214a..5cff4b0 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGroupFacetCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGroupFacetCollector.java
@@ -21,6 +21,7 @@
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.grouping.AbstractGroupFacetCollector;
 import org.apache.lucene.util.BytesRef;
@@ -122,7 +123,7 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       if (segmentFacetCounts != null) {
         segmentResults.add(createSegmentResult());
       }
@@ -277,7 +278,7 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       if (segmentFacetCounts != null) {
         segmentResults.add(createSegmentResult());
       }
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermSecondPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermSecondPassGroupingCollector.java
index 9401c86..624b0f7 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermSecondPassGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermSecondPassGroupingCollector.java
@@ -22,6 +22,7 @@
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.grouping.AbstractSecondPassGroupingCollector;
@@ -53,8 +54,8 @@
   }
 
   @Override
-  public void setNextReader(AtomicReaderContext readerContext) throws IOException {
-    super.setNextReader(readerContext);
+  protected void doSetNextReader(AtomicReaderContext readerContext) throws IOException {
+    super.doSetNextReader(readerContext);
     index = FieldCache.DEFAULT.getTermsIndex(readerContext.reader(), groupField);
 
     // Rebuild ordSet
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java
index 2f7d56d..a84ac74 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java
@@ -35,12 +35,13 @@
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.search.TopDocs;
-
 import org.apache.lucene.search.spans.SpanNearQuery;
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanTermQuery;
@@ -116,7 +117,7 @@
           new SpanTermQuery(new Term(FIELD, "fox")),
           new SpanTermQuery(new Term(FIELD, "jumped")) }, 0, true);
       final FixedBitSet bitset = new FixedBitSet(indexReader.maxDoc());
-      indexSearcher.search(phraseQuery, new Collector() {
+      indexSearcher.search(phraseQuery, new SimpleCollector() {
         private int baseDoc;
 
         @Override
@@ -130,7 +131,7 @@
         }
 
         @Override
-        public void setNextReader(AtomicReaderContext context) {
+        protected void doSetNextReader(AtomicReaderContext context) throws IOException {
           this.baseDoc = context.docBase;
         }
 
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/FakeScorer.java b/lucene/join/src/java/org/apache/lucene/search/join/FakeScorer.java
index d4b02dc..cbd1ff8 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/FakeScorer.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/FakeScorer.java
@@ -19,11 +19,11 @@
 
 import java.util.Collection;
 
-import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Weight;
 
-/** Passed to {@link Collector#setScorer} during join collection. */
+/** Passed to {@link LeafCollector#setScorer} during join collection. */
 final class FakeScorer extends Scorer {
   float score;
   int doc = -1;
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/TermsCollector.java b/lucene/join/src/java/org/apache/lucene/search/join/TermsCollector.java
index 49004b4..56545b5 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/TermsCollector.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/TermsCollector.java
@@ -22,9 +22,11 @@
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefHash;
 
@@ -33,7 +35,7 @@
  *
  * @lucene.experimental
  */
-abstract class TermsCollector extends Collector {
+abstract class TermsCollector extends SimpleCollector {
 
   final String field;
   final BytesRefHash collectorTerms = new BytesRefHash();
@@ -47,10 +49,6 @@
   }
 
   @Override
-  public void setScorer(Scorer scorer) throws IOException {
-  }
-
-  @Override
   public boolean acceptsDocsOutOfOrder() {
     return true;
   }
@@ -86,7 +84,7 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       docTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), field);
     }
   }
@@ -108,7 +106,7 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       fromDocTerms = FieldCache.DEFAULT.getTerms(context.reader(), field, false);
     }
   }
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
index 739ef35..220d0e1 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
@@ -27,6 +27,7 @@
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.ComplexExplanation;
 import org.apache.lucene.search.DocIdSetIterator;
@@ -227,7 +228,7 @@
     }
 
     @Override
-    public boolean score(Collector collector, int max) throws IOException {
+    public boolean score(LeafCollector collector, int max) throws IOException {
       FakeScorer fakeScorer = new FakeScorer();
       collector.setScorer(fakeScorer);
       if (doc == -1) {
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/TermsWithScoreCollector.java b/lucene/join/src/java/org/apache/lucene/search/join/TermsWithScoreCollector.java
index dae42b5..c12f2b9 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/TermsWithScoreCollector.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/TermsWithScoreCollector.java
@@ -22,14 +22,16 @@
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefHash;
 
-abstract class TermsWithScoreCollector extends Collector {
+abstract class TermsWithScoreCollector extends SimpleCollector {
 
   private final static int INITIAL_ARRAY_SIZE = 256;
 
@@ -128,7 +130,7 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       fromDocTerms = FieldCache.DEFAULT.getTerms(context.reader(), field, false);
     }
 
@@ -214,7 +216,7 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       fromDocTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), field);
     }
 
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
index 2e3785d..65767fc 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
@@ -74,7 +74,7 @@
  *
  * @lucene.experimental
  */
-public class ToParentBlockJoinCollector extends Collector {
+public class ToParentBlockJoinCollector extends SimpleCollector {
 
   private final Sort sort;
 
@@ -269,7 +269,7 @@
   }
 
   @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
+  protected void doSetNextReader(AtomicReaderContext context) throws IOException {
     currentReaderContext = context;
     docBase = context.docBase;
     for (int compIDX = 0; compIDX < comparators.length; compIDX++) {
@@ -421,7 +421,7 @@
       }
 
       collector.setScorer(fakeScorer);
-      collector.setNextReader(og.readerContext);
+      collector.getLeafCollector(og.readerContext);
       for(int docIDX=0;docIDX<numChildDocs;docIDX++) {
         //System.out.println("docIDX=" + docIDX + " vs " + og.docs[slot].length);
         final int doc = og.docs[slot][docIDX];
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java b/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
index e9f0802..f343e51 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
@@ -47,6 +47,7 @@
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.Collector;
@@ -58,6 +59,7 @@
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.search.TopScoreDocCollector;
@@ -222,12 +224,9 @@
     bq.add(joinQuery, BooleanClause.Occur.SHOULD);
     bq.add(new TermQuery(new Term("id", "3")), BooleanClause.Occur.SHOULD);
 
-    indexSearcher.search(bq, new Collector() {
+    indexSearcher.search(bq, new SimpleCollector() {
         boolean sawFive;
         @Override
-        public void setNextReader(AtomicReaderContext context) {
-        }
-        @Override
         public void collect(int docID) {
           // Hairy / evil (depends on how BooleanScorer
           // stores temporarily collected docIDs by
@@ -239,9 +238,6 @@
           }
         }
         @Override
-        public void setScorer(Scorer scorer) {
-        }
-        @Override
         public boolean acceptsDocsOutOfOrder() {
           return true;
         }
@@ -407,7 +403,7 @@
         // Need to know all documents that have matches. TopDocs doesn't give me that and then I'd be also testing TopDocsCollector...
         final FixedBitSet actualResult = new FixedBitSet(indexSearcher.getIndexReader().maxDoc());
         final TopScoreDocCollector topScoreDocCollector = TopScoreDocCollector.create(10, false);
-        indexSearcher.search(joinQuery, new Collector() {
+        indexSearcher.search(joinQuery, new SimpleCollector() {
 
           int docBase;
 
@@ -418,9 +414,9 @@
           }
 
           @Override
-          public void setNextReader(AtomicReaderContext context) {
+          protected void doSetNextReader(AtomicReaderContext context) throws IOException {
             docBase = context.docBase;
-            topScoreDocCollector.setNextReader(context);
+            topScoreDocCollector.getLeafCollector(context);
           }
 
           @Override
@@ -572,7 +568,7 @@
       }
       final Map<BytesRef, JoinScore> joinValueToJoinScores = new HashMap<>();
       if (multipleValuesPerDocument) {
-        fromSearcher.search(new TermQuery(new Term("value", uniqueRandomValue)), new Collector() {
+        fromSearcher.search(new TermQuery(new Term("value", uniqueRandomValue)), new SimpleCollector() {
 
           private Scorer scorer;
           private SortedSetDocValues docTermOrds;
@@ -593,7 +589,7 @@
           }
 
           @Override
-          public void setNextReader(AtomicReaderContext context) throws IOException {
+          protected void doSetNextReader(AtomicReaderContext context) throws IOException {
             docTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), fromField);
           }
 
@@ -608,7 +604,7 @@
           }
         });
       } else {
-        fromSearcher.search(new TermQuery(new Term("value", uniqueRandomValue)), new Collector() {
+        fromSearcher.search(new TermQuery(new Term("value", uniqueRandomValue)), new SimpleCollector() {
 
           private Scorer scorer;
           private BinaryDocValues terms;
@@ -631,7 +627,7 @@
           }
 
           @Override
-          public void setNextReader(AtomicReaderContext context) throws IOException {
+          protected void doSetNextReader(AtomicReaderContext context) throws IOException {
             terms = FieldCache.DEFAULT.getTerms(context.reader(), fromField, true);
             docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), fromField);
           }
@@ -675,7 +671,7 @@
             }
           }
         } else {
-          toSearcher.search(new MatchAllDocsQuery(), new Collector() {
+          toSearcher.search(new MatchAllDocsQuery(), new SimpleCollector() {
 
             private SortedSetDocValues docTermOrds;
             private final BytesRef scratch = new BytesRef();
@@ -701,7 +697,7 @@
             }
 
             @Override
-            public void setNextReader(AtomicReaderContext context) throws IOException {
+            protected void doSetNextReader(AtomicReaderContext context) throws IOException {
               docBase = context.docBase;
               docTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), toField);
             }
@@ -713,7 +709,7 @@
           });
         }
       } else {
-        toSearcher.search(new MatchAllDocsQuery(), new Collector() {
+        toSearcher.search(new MatchAllDocsQuery(), new SimpleCollector() {
 
           private BinaryDocValues terms;
           private int docBase;
@@ -730,7 +726,7 @@
           }
 
           @Override
-          public void setNextReader(AtomicReaderContext context) throws IOException {
+          protected void doSetNextReader(AtomicReaderContext context) throws IOException {
             terms = FieldCache.DEFAULT.getTerms(context.reader(), toField, false);
             docBase = context.docBase;
           }
diff --git a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
index db79ff0..a768774 100644
--- a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
+++ b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
@@ -54,6 +54,7 @@
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.store.RAMDirectory; // for javadocs
 import org.apache.lucene.util.ArrayUtil;
@@ -532,7 +533,7 @@
     IndexSearcher searcher = createSearcher();
     try {
       final float[] scores = new float[1]; // inits to 0.0f (no match)
-      searcher.search(query, new Collector() {
+      searcher.search(query, new SimpleCollector() {
         private Scorer scorer;
 
         @Override
@@ -550,8 +551,6 @@
           return true;
         }
 
-        @Override
-        public void setNextReader(AtomicReaderContext context) { }
       });
       float score = scores[0];
       return score;
diff --git a/lucene/misc/src/java/org/apache/lucene/index/sorter/EarlyTerminatingSortingCollector.java b/lucene/misc/src/java/org/apache/lucene/index/sorter/EarlyTerminatingSortingCollector.java
index 23772e1..2571632 100644
--- a/lucene/misc/src/java/org/apache/lucene/index/sorter/EarlyTerminatingSortingCollector.java
+++ b/lucene/misc/src/java/org/apache/lucene/index/sorter/EarlyTerminatingSortingCollector.java
@@ -21,9 +21,11 @@
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.CollectionTerminatedException;
 import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.FilterLeafCollector;
+import org.apache.lucene.search.FilterCollector;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.TopDocsCollector;
 import org.apache.lucene.search.TotalHitCountCollector;
@@ -32,11 +34,11 @@
  * A {@link Collector} that early terminates collection of documents on a
  * per-segment basis, if the segment was sorted according to the given
  * {@link Sort}.
- * 
+ *
  * <p>
  * <b>NOTE:</b> the {@code Collector} detects sorted segments according to
  * {@link SortingMergePolicy}, so it's best used in conjunction with it. Also,
- * it collects up to a specified {@code numDocsToCollect} from each segment, 
+ * it collects up to a specified {@code numDocsToCollect} from each segment,
  * and therefore is mostly suitable for use in conjunction with collectors such as
  * {@link TopDocsCollector}, and not e.g. {@link TotalHitCountCollector}.
  * <p>
@@ -58,26 +60,21 @@
  * the old and the new {@code Sort}s have the same identifier, this
  * {@code Collector} will incorrectly detect sorted segments.</li>
  * </ul>
- * 
+ *
  * @lucene.experimental
  */
-public class EarlyTerminatingSortingCollector extends Collector {
-  /** The wrapped Collector */
-  protected final Collector in;
+public class EarlyTerminatingSortingCollector extends FilterCollector {
+
   /** Sort used to sort the search results */
   protected final Sort sort;
   /** Number of documents to collect in each segment */
   protected final int numDocsToCollect;
-  /** Number of documents to collect in the current segment being processed */
-  protected int segmentTotalCollect;
-  /** True if the current segment being processed is sorted by {@link #sort} */
-  protected boolean segmentSorted;
 
   private int numCollected;
 
   /**
    * Create a new {@link EarlyTerminatingSortingCollector} instance.
-   * 
+   *
    * @param in
    *          the collector to wrap
    * @param sort
@@ -88,38 +85,37 @@
    *          hits.
    */
   public EarlyTerminatingSortingCollector(Collector in, Sort sort, int numDocsToCollect) {
+    super(in);
     if (numDocsToCollect <= 0) {
-      throw new IllegalStateException("numDocsToCollect must always be > 0, got " + segmentTotalCollect);
+      throw new IllegalStateException("numDocsToCollect must always be > 0, got " + numDocsToCollect);
     }
-    this.in = in;
     this.sort = sort;
     this.numDocsToCollect = numDocsToCollect;
   }
 
   @Override
-  public void setScorer(Scorer scorer) throws IOException {
-    in.setScorer(scorer);
-  }
+  public LeafCollector getLeafCollector(AtomicReaderContext context) throws IOException {
+    if (SortingMergePolicy.isSorted(context.reader(), sort)) {
+      // segment is sorted, can early-terminate
+      return new FilterLeafCollector(super.getLeafCollector(context)) {
 
-  @Override
-  public void collect(int doc) throws IOException {
-    in.collect(doc);
-    if (++numCollected >= segmentTotalCollect) {
-      throw new CollectionTerminatedException();
+        @Override
+        public void collect(int doc) throws IOException {
+          super.collect(doc);
+          if (++numCollected >= numDocsToCollect) {
+            throw new CollectionTerminatedException();
+          }
+        }
+
+        @Override
+        public boolean acceptsDocsOutOfOrder() {
+          return false;
+        }
+
+      };
+    } else {
+      return super.getLeafCollector(context);
     }
   }
 
-  @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
-    in.setNextReader(context);
-    segmentSorted = SortingMergePolicy.isSorted(context.reader(), sort);
-    segmentTotalCollect = segmentSorted ? numDocsToCollect : Integer.MAX_VALUE;
-    numCollected = 0;
-  }
-
-  @Override
-  public boolean acceptsDocsOutOfOrder() {
-    return !segmentSorted && in.acceptsDocsOutOfOrder();
-  }
-
 }
diff --git a/lucene/misc/src/test/org/apache/lucene/index/sorter/TestEarlyTermination.java b/lucene/misc/src/test/org/apache/lucene/index/sorter/TestEarlyTermination.java
index f64f56d..716cc20 100644
--- a/lucene/misc/src/test/org/apache/lucene/index/sorter/TestEarlyTermination.java
+++ b/lucene/misc/src/test/org/apache/lucene/index/sorter/TestEarlyTermination.java
@@ -34,6 +34,7 @@
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Sort;
@@ -147,9 +148,10 @@
       Sort different = new Sort(new SortField("ndv2", SortField.Type.LONG));
       searcher.search(query, new EarlyTerminatingSortingCollector(collector2, different, numHits) {
         @Override
-        public void setNextReader(AtomicReaderContext context) throws IOException {
-          super.setNextReader(context);
-          assertFalse("segment should not be recognized as sorted as different sorter was used", segmentSorted);
+        public LeafCollector getLeafCollector(AtomicReaderContext context) throws IOException {
+          final LeafCollector ret = super.getLeafCollector(context);
+          assertTrue("segment should not be recognized as sorted as different sorter was used", ret.getClass() == in.getLeafCollector(context).getClass());
+          return ret;
         }
       });
     }
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/BooleanQueryTst.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/BooleanQueryTst.java
index 35a402d..4c6349b 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/BooleanQueryTst.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/BooleanQueryTst.java
@@ -22,13 +22,13 @@
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Query;
-
+import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.queryparser.surround.parser.QueryParser;
-
 import org.junit.Assert;
 
 public class BooleanQueryTst {
@@ -57,7 +57,7 @@
   
   public void setVerbose(boolean verbose) {this.verbose = verbose;}
 
-  class TestCollector extends Collector { // FIXME: use check hits from Lucene tests
+  class TestCollector extends SimpleCollector { // FIXME: use check hits from Lucene tests
     int totalMatched;
     boolean[] encountered;
     private Scorer scorer = null;
@@ -79,7 +79,7 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       docBase = context.docBase;
     }
     
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/AbstractVisitingPrefixTreeFilter.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/AbstractVisitingPrefixTreeFilter.java
index 627e854..124362b 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/AbstractVisitingPrefixTreeFilter.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/AbstractVisitingPrefixTreeFilter.java
@@ -25,7 +25,6 @@
 import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.StringHelper;
 
 import java.io.IOException;
 import java.util.Iterator;
@@ -117,7 +116,7 @@
     protected final boolean hasIndexedLeaves;//if false then we can skip looking for them
 
     private VNode curVNode;//current pointer, derived from query shape
-    private BytesRef curVNodeTerm = new BytesRef();//curVNode.cell's term.
+    private BytesRef curVNodeTerm = new BytesRef();//curVNode.cell's term, without leaf
     private Cell scanCell;
 
     private BytesRef thisTerm;//the result of termsEnum.term()
@@ -171,8 +170,7 @@
         }
 
         //Seek to curVNode's cell (or skip if termsEnum has moved beyond)
-        curVNodeTerm.bytes = curVNode.cell.getTokenBytes();
-        curVNodeTerm.length = curVNodeTerm.bytes.length;
+        curVNode.cell.getTokenBytesNoLeaf(curVNodeTerm);
         int compare = thisTerm.compareTo(curVNodeTerm);
         if (compare > 0) {
           // leap frog (termsEnum is beyond where we would otherwise seek)
@@ -215,7 +213,7 @@
       if (hasIndexedLeaves && cell.getLevel() != 0) {
         //If the next indexed term just adds a leaf marker ('+') to cell,
         // then add all of those docs
-        assert StringHelper.startsWith(thisTerm, curVNodeTerm);//TODO refactor to use method on curVNode.cell
+        assert curVNode.cell.isWithin(curVNodeTerm, thisTerm);
         scanCell = grid.getCell(thisTerm.bytes, thisTerm.offset, thisTerm.length, scanCell);
         if (scanCell.getLevel() == cell.getLevel() && scanCell.isLeaf()) {
           visitLeaf(scanCell);
@@ -265,7 +263,7 @@
      */
     protected void scan(int scanDetailLevel) throws IOException {
       for (;
-           thisTerm != null && StringHelper.startsWith(thisTerm, curVNodeTerm);//TODO refactor to use method on curVNode.cell
+           thisTerm != null && curVNode.cell.isWithin(curVNodeTerm, thisTerm);
            thisTerm = termsEnum.next()) {
         scanCell = grid.getCell(thisTerm.bytes, thisTerm.offset, thisTerm.length, scanCell);
 
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/CellTokenStream.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/CellTokenStream.java
new file mode 100644
index 0000000..426b922
--- /dev/null
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/CellTokenStream.java
@@ -0,0 +1,183 @@
+package org.apache.lucene.spatial.prefix;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
+import org.apache.lucene.spatial.prefix.tree.Cell;
+import org.apache.lucene.util.Attribute;
+import org.apache.lucene.util.AttributeImpl;
+import org.apache.lucene.util.AttributeReflector;
+import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.BytesRef;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+/**
+ * A TokenStream used internally by {@link org.apache.lucene.spatial.prefix.PrefixTreeStrategy}.
+ *
+ * This is highly modelled after {@link org.apache.lucene.analysis.NumericTokenStream}.
+ *
+ * If there is demand for it to be public; it could be made to be.
+ *
+ * @lucene.internal
+ */
+class CellTokenStream extends TokenStream {
+
+  private interface CellTermAttribute extends Attribute {
+    Cell getCell();
+    void setCell(Cell cell);
+
+    //TODO one day deprecate this once we have better encodings
+    boolean getOmitLeafByte();
+    void setOmitLeafByte(boolean b);
+  }
+
+  // just a wrapper to prevent adding CTA
+  private static final class CellAttributeFactory extends AttributeSource.AttributeFactory {
+    private final AttributeSource.AttributeFactory delegate;
+
+    CellAttributeFactory(AttributeSource.AttributeFactory delegate) {
+      this.delegate = delegate;
+    }
+
+    @Override
+    public AttributeImpl createAttributeInstance(Class<? extends Attribute> attClass) {
+      if (CharTermAttribute.class.isAssignableFrom(attClass))
+        throw new IllegalArgumentException("CellTokenStream does not support CharTermAttribute.");
+      return delegate.createAttributeInstance(attClass);
+    }
+  }
+
+  private static final class CellTermAttributeImpl extends AttributeImpl
+      implements CellTermAttribute, TermToBytesRefAttribute {
+    private BytesRef bytes = new BytesRef();
+    private Cell cell;
+    private boolean omitLeafByte;//false by default (whether there's a leaf byte or not)
+
+    @Override
+    public Cell getCell() {
+      return cell;
+    }
+
+    @Override
+    public boolean getOmitLeafByte() {
+      return omitLeafByte;
+    }
+
+    @Override
+    public void setCell(Cell cell) {
+      this.cell = cell;
+      omitLeafByte = false;//reset
+    }
+
+    @Override
+    public void setOmitLeafByte(boolean b) {
+      omitLeafByte = b;
+    }
+
+    @Override
+    public void clear() {
+      // this attribute has no contents to clear!
+      // we keep it untouched as it's fully controlled by outer class.
+    }
+
+    @Override
+    public void copyTo(AttributeImpl target) {
+      final CellTermAttribute a = (CellTermAttribute) target;
+      a.setCell(cell);
+      a.setOmitLeafByte(omitLeafByte);
+    }
+
+    @Override
+    public int fillBytesRef() {
+      if (omitLeafByte)
+        cell.getTokenBytesNoLeaf(bytes);
+      else
+        cell.getTokenBytes(bytes);
+      return bytes.hashCode();
+    }
+
+    @Override
+    public BytesRef getBytesRef() {
+      return bytes;
+    }
+
+    @Override
+    public void reflectWith(AttributeReflector reflector) {
+      fillBytesRef();
+      reflector.reflect(TermToBytesRefAttribute.class, "bytes", BytesRef.deepCopyOf(bytes));
+    }
+  }
+
+  public CellTokenStream() {
+    this(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY);
+  }
+
+  public CellTokenStream(AttributeFactory factory) {
+    super(new CellAttributeFactory(factory));
+  }
+
+  public CellTokenStream setCells(Iterator<Cell> iter) {
+    this.iter = iter;
+    return this;
+  }
+
+  @Override
+  public void reset() throws IOException {
+    if (iter == null)
+      throw new IllegalStateException("call setCells() before usage");
+    cellAtt.setCell(null);
+    cellAtt.setOmitLeafByte(false);
+  }
+
+  /** Outputs the token of a cell, and if its a leaf, outputs it again with the leaf byte. */
+  @Override
+  public final boolean incrementToken() {
+    if (iter == null)
+      throw new IllegalStateException("call setCells() before usage");
+
+    // this will only clear all other attributes in this TokenStream
+    clearAttributes();
+
+    if (cellAtt.getOmitLeafByte()) {
+      cellAtt.setOmitLeafByte(false);
+      return true;
+    }
+    //get next
+    if (!iter.hasNext())
+      return false;
+    cellAtt.setCell(iter.next());
+    if (cellAtt.getCell().isLeaf())
+      cellAtt.setOmitLeafByte(true);
+    return true;
+  }
+
+  {
+    addAttributeImpl(new CellTermAttributeImpl());//because non-public constructor
+  }
+  //members
+  private final CellTermAttribute cellAtt = addAttribute(CellTermAttribute.class);
+
+  //TODO support position increment, and TypeAttribute
+
+  private Iterator<Cell> iter = null; // null means not initialized
+
+}
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeFilter.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeFilter.java
index e9ee786..e0188e6 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeFilter.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeFilter.java
@@ -83,7 +83,7 @@
       super(context, acceptDocs);
     }
 
-    BytesRef termBytes = new BytesRef();
+    BytesRef termBytes = new BytesRef();//no leaf
     Cell nextCell;//see getLeafDocs
 
     /** This is the primary algorithm; recursive.  Returns null if finds none. */
@@ -130,16 +130,15 @@
     }
 
     private boolean seekExact(Cell cell) throws IOException {
-      assert new BytesRef(cell.getTokenBytes()).compareTo(termBytes) > 0;
-      termBytes.bytes = cell.getTokenBytes();
-      termBytes.length = termBytes.bytes.length;
+      assert cell.getTokenBytesNoLeaf(null).compareTo(termBytes) > 0;
+      cell.getTokenBytesNoLeaf(termBytes);
       if (termsEnum == null)
         return false;
       return termsEnum.seekExact(termBytes);
     }
 
     private SmallDocSet getDocs(Cell cell, Bits acceptContains) throws IOException {
-      assert new BytesRef(cell.getTokenBytes()).equals(termBytes);
+      assert cell.getTokenBytesNoLeaf(null).equals(termBytes);
 
       return collectDocs(acceptContains);
     }
@@ -147,7 +146,7 @@
     private Cell lastLeaf = null;//just for assertion
 
     private SmallDocSet getLeafDocs(Cell leafCell, Bits acceptContains) throws IOException {
-      assert new BytesRef(leafCell.getTokenBytes()).equals(termBytes);
+      assert leafCell.getTokenBytesNoLeaf(null).equals(termBytes);
       assert ! leafCell.equals(lastLeaf);//don't call for same leaf again
       lastLeaf = leafCell;
 
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PointPrefixTreeFieldCacheProvider.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PointPrefixTreeFieldCacheProvider.java
index 0f3abf4..0cc5b9a 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PointPrefixTreeFieldCacheProvider.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PointPrefixTreeFieldCacheProvider.java
@@ -46,7 +46,7 @@
   protected Point readShape(BytesRef term) {
     scanCell = grid.getCell(term.bytes, term.offset, term.length, scanCell);
     if (scanCell.getLevel() == grid.getMaxLevels() && !scanCell.isLeaf())
-      return scanCell.getCenter();
+      return scanCell.getShape().getCenter();
     return null;
   }
 }
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java
index 31b9a85..4a43aad 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java
@@ -19,8 +19,6 @@
 
 import com.spatial4j.core.shape.Point;
 import com.spatial4j.core.shape.Shape;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.index.FieldInfo;
@@ -31,7 +29,6 @@
 import org.apache.lucene.spatial.query.SpatialArgs;
 import org.apache.lucene.spatial.util.ShapeFieldCacheDistanceValueSource;
 
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
@@ -125,13 +122,12 @@
 
   public Field[] createIndexableFields(Shape shape, double distErr) {
     int detailLevel = grid.getLevelForDistance(distErr);
+    // note: maybe CellTokenStream should do this line, but it doesn't matter and it would create extra
+    // coupling
     List<Cell> cells = grid.getCells(shape, detailLevel, true, simplifyIndexedCells);//intermediates cells
 
-    //TODO is CellTokenStream supposed to be re-used somehow? see Uwe's comments:
-    //  http://code.google.com/p/lucene-spatial-playground/issues/detail?id=4
-
     Field field = new Field(getFieldName(),
-        new CellTokenStream(cells.iterator()), FIELD_TYPE);
+        new CellTokenStream().setCells(cells.iterator()), FIELD_TYPE);
     return new Field[]{field};
   }
 
@@ -146,41 +142,6 @@
     FIELD_TYPE.freeze();
   }
 
-  /** Outputs the tokenString of a cell, and if its a leaf, outputs it again with the leaf byte. */
-  final static class CellTokenStream extends TokenStream {
-
-    private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
-
-    private Iterator<Cell> iter = null;
-
-    public CellTokenStream(Iterator<Cell> tokens) {
-      this.iter = tokens;
-    }
-
-    CharSequence nextTokenStringNeedingLeaf = null;
-
-    @Override
-    public boolean incrementToken() {
-      clearAttributes();
-      if (nextTokenStringNeedingLeaf != null) {
-        termAtt.append(nextTokenStringNeedingLeaf);
-        termAtt.append((char) Cell.LEAF_BYTE);
-        nextTokenStringNeedingLeaf = null;
-        return true;
-      }
-      if (iter.hasNext()) {
-        Cell cell = iter.next();
-        CharSequence token = cell.getTokenString();
-        termAtt.append(token);
-        if (cell.isLeaf())
-          nextTokenStringNeedingLeaf = token;
-        return true;
-      }
-      return false;
-    }
-
-  }
-
   @Override
   public ValueSource makeDistanceValueSource(Point queryPoint, double multiplier) {
     PointPrefixTreeFieldCacheProvider p = provider.get( getFieldName() );
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java
index f9fb22c..f4e9462 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java
@@ -61,7 +61,7 @@
     BytesRef[] terms = new BytesRef[cells.size()];
     int i = 0;
     for (Cell cell : cells) {
-      terms[i++] = new BytesRef(cell.getTokenString());//TODO use cell.getTokenBytes()
+      terms[i++] = cell.getTokenBytesNoLeaf(null);
     }
     return new TermsFilter(getFieldName(), terms);
   }
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/Cell.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/Cell.java
index 592f815..a6630bc 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/Cell.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/Cell.java
@@ -20,6 +20,8 @@
 import com.spatial4j.core.shape.Point;
 import com.spatial4j.core.shape.Shape;
 import com.spatial4j.core.shape.SpatialRelation;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.StringHelper;
 
 import java.util.ArrayList;
 import java.util.Collection;
@@ -27,74 +29,45 @@
 import java.util.List;
 
 /**
- * Represents a grid cell. These are not necessarily thread-safe, although new
- * Cell("") (world cell) must be.
+ * Represents a grid cell. These are not necessarily thread-safe, although calling {@link #getShape()} will
+ * sufficiently prepare it to be so, if needed.
  *
  * @lucene.experimental
  */
-public abstract class Cell implements Comparable<Cell> {
-  public static final byte LEAF_BYTE = '+';//NOTE: must sort before letters & numbers
+public abstract class Cell {
 
-  /*
-  Holds a byte[] and/or String representation of the cell. Both are lazy constructed from the other.
-  Neither contains the trailing leaf byte.
-   */
+  private static final byte LEAF_BYTE = '+';//NOTE: must sort before letters & numbers
+
+  //Arguably we could simply use a BytesRef, using an extra Object.
   private byte[] bytes;
   private int b_off;
   private int b_len;
 
-  private String token;//this is the only part of equality
-
   /**
    * When set via getSubCells(filter), it is the relationship between this cell
-   * and the given shape filter.
+   * and the given shape filter. Doesn't participate in shape equality.
    */
   protected SpatialRelation shapeRel;
 
-  /**
-   * Always false for points. Otherwise, indicate no further sub-cells are going
-   * to be provided because shapeRel is WITHIN or maxLevels or a detailLevel is
-   * hit.
-   */
-  protected boolean leaf;
-
-  protected Cell(String token) {
-    this.token = token;
-    if (token.length() > 0 && token.charAt(token.length() - 1) == (char) LEAF_BYTE) {
-      this.token = token.substring(0, token.length() - 1);
-      setLeaf();
-    }
-
-    if (getLevel() == 0)
-      getShape();//ensure any lazy instantiation completes to make this threadsafe
-  }
-
+  /** Warning: Refers to the same bytes (no copy). If {@link #setLeaf()} is subsequently called then it
+   * may modify bytes. */
   protected Cell(byte[] bytes, int off, int len) {
     this.bytes = bytes;
     this.b_off = off;
     this.b_len = len;
-    b_fixLeaf();
   }
 
+  /** Warning: Refers to the same bytes (no copy). If {@link #setLeaf()} is subsequently called then it
+   * may modify bytes. */
   public void reset(byte[] bytes, int off, int len) {
     assert getLevel() != 0;
-    token = null;
     shapeRel = null;
     this.bytes = bytes;
     this.b_off = off;
     this.b_len = len;
-    b_fixLeaf();
   }
 
-  private void b_fixLeaf() {
-    //note that non-point shapes always have the maxLevels cell set with setLeaf
-    if (bytes[b_off + b_len - 1] == LEAF_BYTE) {
-      b_len--;
-      setLeaf();
-    } else {
-      leaf = false;
-    }
-  }
+  protected abstract SpatialPrefixTree getGrid();
 
   public SpatialRelation getShapeRel() {
     return shapeRel;
@@ -105,47 +78,68 @@
    * further cells with this prefix for the shape (always true at maxLevels).
    */
   public boolean isLeaf() {
-    return leaf;
+    return (b_len > 0 && bytes[b_off + b_len - 1] == LEAF_BYTE);
   }
 
-  /** Note: not supported at level 0. */
+  /** Modifies the bytes to reflect that this is a leaf. Warning: never invoke from a cell
+   * initialized to reference the same bytes from termsEnum, which should be treated as immutable.
+   * Note: not supported at level 0. */
   public void setLeaf() {
     assert getLevel() != 0;
-    leaf = true;
-  }
-
-  /**
-   * Note: doesn't contain a trailing leaf byte.
-   */
-  public String getTokenString() {
-    if (token == null) {
-      token = new String(bytes, b_off, b_len, SpatialPrefixTree.UTF8);
-    }
-    return token;
-  }
-
-  /**
-   * Note: doesn't contain a trailing leaf byte.
-   */
-  public byte[] getTokenBytes() {
-    if (bytes != null) {
-      if (b_off != 0 || b_len != bytes.length) {
-        throw new IllegalStateException("Not supported if byte[] needs to be recreated.");
-      }
-    } else {
-      bytes = token.getBytes(SpatialPrefixTree.UTF8);
+    if (isLeaf())
+      return;
+    //if isn't big enough, we have to copy
+    if (bytes.length < b_off + b_len) {
+      //hopefully this copying doesn't happen too much (DWS: I checked and it doesn't seem to happen)
+      byte[] copy = new byte[b_len + 1];
+      System.arraycopy(bytes, b_off, copy, 0, b_len);
+      copy[b_len++] = LEAF_BYTE;
+      bytes = copy;
       b_off = 0;
-      b_len = bytes.length;
+    } else {
+      bytes[b_off + b_len++] = LEAF_BYTE;
     }
-    return bytes;
   }
 
+  /**
+   * Returns the bytes for this cell.
+   * The result param is used to save object allocation, though it's bytes aren't used.
+   * @param result where the result goes, or null to create new
+   */
+  public BytesRef getTokenBytes(BytesRef result) {
+    if (result == null)
+      result = new BytesRef();
+    result.bytes = bytes;
+    result.offset = b_off;
+    result.length = b_len;
+    return result;
+  }
+
+  /**
+   * Returns the bytes for this cell, without leaf set. The bytes should sort before any
+   * cells that have the leaf set for the spatial location.
+   * The result param is used to save object allocation, though it's bytes aren't used.
+   * @param result where the result goes, or null to create new
+   */
+  public BytesRef getTokenBytesNoLeaf(BytesRef result) {
+    result = getTokenBytes(result);
+    if (isLeaf())
+      result.length--;
+    return result;
+  }
+
+  /** Level 0 is the world (and has no parent), from then on a higher level means a smaller
+   * cell than the level before it.
+   */
   public int getLevel() {
-    return token != null ? token.length() : b_len;
+    return isLeaf() ? b_len - 1 : b_len;
   }
 
-  //TODO add getParent() and update some algorithms to use this?
-  //public Cell getParent();
+  /** Gets the parent cell that contains this one. Don't call on the world cell. */
+  public Cell getParent() {
+    assert getLevel() > 0;
+    return getGrid().getCell(bytes, b_off, b_len - (isLeaf() ? 2 : 1));
+  }
 
   /**
    * Like {@link #getSubCells()} but with the results filtered by a shape. If
@@ -196,8 +190,6 @@
    */
   public abstract Cell getSubCell(Point p);
 
-  //TODO Cell getSubCell(byte b)
-
   /**
    * Gets the cells at the next grid cell level that cover this cell.
    * Precondition: Never called when getLevel() == maxLevel.
@@ -211,30 +203,45 @@
    */
   public abstract int getSubCellsSize();
 
+  /** Gets the shape for this cell; typically a Rectangle. This method also serves to trigger any lazy
+   * loading needed to make the cell instance thread-safe.
+   */
   public abstract Shape getShape();
 
+  /** TODO remove once no longer used. */
   public Point getCenter() {
     return getShape().getCenter();
   }
 
   @Override
-  public int compareTo(Cell o) {
-    return getTokenString().compareTo(o.getTokenString());
-  }
-
-  @Override
   public boolean equals(Object obj) {
-    return !(obj == null || !(obj instanceof Cell)) && getTokenString().equals(((Cell) obj).getTokenString());
+    //this method isn't "normally" called; just in asserts/tests
+    if (obj instanceof Cell) {
+      Cell cell = (Cell) obj;
+      return getTokenBytes(null).equals(cell.getTokenBytes(null));
+    } else {
+      return false;
+    }
   }
 
   @Override
   public int hashCode() {
-    return getTokenString().hashCode();
+    return getTokenBytesNoLeaf(null).hashCode();
   }
 
   @Override
   public String toString() {
-    return getTokenString() + (isLeaf() ? (char) LEAF_BYTE : "");
+    //this method isn't "normally" called; just in asserts/tests
+    return getTokenBytes(null).utf8ToString();
   }
 
+  /**
+   * Returns if the target term is within/underneath this cell; not necessarily a direct descendant.
+   * @param bytesNoLeaf must be getTokenBytesNoLeaf
+   * @param term the term
+   */
+  public boolean isWithin(BytesRef bytesNoLeaf, BytesRef term) {
+    assert bytesNoLeaf.equals(getTokenBytesNoLeaf(null));
+    return StringHelper.startsWith(term, bytesNoLeaf);
+  }
 }
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/GeohashPrefixTree.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/GeohashPrefixTree.java
index 5f2ca6d..e656bec 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/GeohashPrefixTree.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/GeohashPrefixTree.java
@@ -84,18 +84,29 @@
   }
 
   @Override
-  public Cell getCell(String token) {
-    return new GhCell(token);
-  }
-
-  @Override
   public Cell getCell(byte[] bytes, int offset, int len) {
     return new GhCell(bytes, offset, len);
   }
 
+  private static byte[] stringToBytesPlus1(String token) {
+    //copy ASCII token to byte array with one extra spot for eventual LEAF_BYTE if needed
+    byte[] bytes = new byte[token.length() + 1];
+    for (int i = 0; i < token.length(); i++) {
+      bytes[i] = (byte) token.charAt(i);
+    }
+    return bytes;
+  }
+
   class GhCell extends Cell {
-    GhCell(String token) {
-      super(token);
+
+    private Shape shape;//cache
+    private String geohash;//cache; never has leaf byte, simply a geohash
+
+    GhCell(String geohash) {
+      super(stringToBytesPlus1(geohash), 0, geohash.length());
+      this.geohash = geohash;
+      if (isLeaf())
+        this.geohash = geohash.substring(0, geohash.length() - 1);
     }
 
     GhCell(byte[] bytes, int off, int len) {
@@ -103,8 +114,12 @@
     }
 
     @Override
+    protected SpatialPrefixTree getGrid() { return GeohashPrefixTree.this; }
+
+    @Override
     public void reset(byte[] bytes, int off, int len) {
       super.reset(bytes, off, len);
+      geohash = null;
       shape = null;
     }
 
@@ -125,26 +140,26 @@
 
     @Override
     public Cell getSubCell(Point p) {
-      return GeohashPrefixTree.this.getCell(p, getLevel() + 1);//not performant!
+      return getGrid().getCell(p, getLevel() + 1);//not performant!
     }
 
-    private Shape shape;//cache
-
     @Override
     public Shape getShape() {
       if (shape == null) {
-        shape = GeohashUtils.decodeBoundary(getGeohash(), ctx);
+        shape = GeohashUtils.decodeBoundary(getGeohash(), getGrid().getSpatialContext());
       }
       return shape;
     }
 
     @Override
     public Point getCenter() {
-      return GeohashUtils.decode(getGeohash(), ctx);
+      return GeohashUtils.decode(getGeohash(), getGrid().getSpatialContext());
     }
 
     private String getGeohash() {
-      return getTokenString();
+      if (geohash == null)
+        geohash = getTokenBytesNoLeaf(null).utf8ToString();
+      return geohash;
     }
 
   }//class GhCell
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java
index d2e16a1..8437dc9 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java
@@ -22,6 +22,7 @@
 import com.spatial4j.core.shape.Rectangle;
 import com.spatial4j.core.shape.Shape;
 import com.spatial4j.core.shape.SpatialRelation;
+import org.apache.lucene.util.BytesRef;
 
 import java.io.PrintStream;
 import java.text.NumberFormat;
@@ -142,16 +143,11 @@
   @Override
   public Cell getCell(Point p, int level) {
     List<Cell> cells = new ArrayList<>(1);
-    build(xmid, ymid, 0, cells, new StringBuilder(), ctx.makePoint(p.getX(),p.getY()), level);
+    build(xmid, ymid, 0, cells, new BytesRef(maxLevels+1), ctx.makePoint(p.getX(),p.getY()), level);
     return cells.get(0);//note cells could be longer if p on edge
   }
 
   @Override
-  public Cell getCell(String token) {
-    return new QuadCell(token);
-  }
-
-  @Override
   public Cell getCell(byte[] bytes, int offset, int len) {
     return new QuadCell(bytes, offset, len);
   }
@@ -161,10 +157,10 @@
       double y,
       int level,
       List<Cell> matches,
-      StringBuilder str,
+      BytesRef str,
       Shape shape,
       int maxLevel) {
-    assert str.length() == level;
+    assert str.length == level;
     double w = levelW[level] / 2;
     double h = levelH[level] / 2;
 
@@ -187,51 +183,51 @@
       double cy,
       int level,
       List<Cell> matches,
-      StringBuilder str,
+      BytesRef str,
       Shape shape,
       int maxLevel) {
-    assert str.length() == level;
+    assert str.length == level;
+    assert str.offset == 0;
     double w = levelW[level] / 2;
     double h = levelH[level] / 2;
 
-    int strlen = str.length();
+    int strlen = str.length;
     Rectangle rectangle = ctx.makeRectangle(cx - w, cx + w, cy - h, cy + h);
     SpatialRelation v = shape.relate(rectangle);
     if (SpatialRelation.CONTAINS == v) {
-      str.append(c);
+      str.bytes[str.length++] = (byte)c;//append
       //str.append(SpatialPrefixGrid.COVER);
-      matches.add(new QuadCell(str.toString(),v.transpose()));
+      matches.add(new QuadCell(BytesRef.deepCopyOf(str), v.transpose()));
     } else if (SpatialRelation.DISJOINT == v) {
       // nothing
     } else { // SpatialRelation.WITHIN, SpatialRelation.INTERSECTS
-      str.append(c);
+      str.bytes[str.length++] = (byte)c;//append
 
       int nextLevel = level+1;
       if (nextLevel >= maxLevel) {
         //str.append(SpatialPrefixGrid.INTERSECTS);
-        matches.add(new QuadCell(str.toString(),v.transpose()));
+        matches.add(new QuadCell(BytesRef.deepCopyOf(str), v.transpose()));
       } else {
         build(cx, cy, nextLevel, matches, str, shape, maxLevel);
       }
     }
-    str.setLength(strlen);
+    str.length = strlen;
   }
 
-  class QuadCell extends Cell {
-
-    public QuadCell(String token) {
-      super(token);
-    }
-
-    public QuadCell(String token, SpatialRelation shapeRel) {
-      super(token);
-      this.shapeRel = shapeRel;
-    }
+  class QuadCell extends Cell{
 
     QuadCell(byte[] bytes, int off, int len) {
       super(bytes, off, len);
     }
 
+    QuadCell(BytesRef str, SpatialRelation shapeRel) {
+      this(str.bytes, str.offset, str.length);
+      this.shapeRel = shapeRel;
+    }
+
+    @Override
+    protected SpatialPrefixTree getGrid() { return QuadPrefixTree.this; }
+
     @Override
     public void reset(byte[] bytes, int off, int len) {
       super.reset(bytes, off, len);
@@ -240,14 +236,26 @@
 
     @Override
     public Collection<Cell> getSubCells() {
+      BytesRef source = getTokenBytesNoLeaf(null);
+      BytesRef target = new BytesRef();
+
       List<Cell> cells = new ArrayList<>(4);
-      cells.add(new QuadCell(getTokenString()+"A"));
-      cells.add(new QuadCell(getTokenString()+"B"));
-      cells.add(new QuadCell(getTokenString()+"C"));
-      cells.add(new QuadCell(getTokenString()+"D"));
+      cells.add(new QuadCell(concat(source, (byte)'A', target), null));
+      cells.add(new QuadCell(concat(source, (byte)'B', target), null));
+      cells.add(new QuadCell(concat(source, (byte)'C', target), null));
+      cells.add(new QuadCell(concat(source, (byte)'D', target), null));
       return cells;
     }
 
+    private BytesRef concat(BytesRef source, byte b, BytesRef target) {
+      assert target.offset == 0;
+      target.bytes = new byte[source.length + 2];//+2 for new char + potential leaf
+      target.length = 0;
+      target.copyBytes(source);
+      target.bytes[target.length++] = b;
+      return target;
+    }
+
     @Override
     public int getSubCellsSize() {
       return 4;
@@ -268,27 +276,30 @@
     }
 
     private Rectangle makeShape() {
-      String token = getTokenString();
+      BytesRef token = getTokenBytesNoLeaf(null);
       double xmin = QuadPrefixTree.this.xmin;
       double ymin = QuadPrefixTree.this.ymin;
 
-      for (int i = 0; i < token.length(); i++) {
-        char c = token.charAt(i);
-        if ('A' == c || 'a' == c) {
-          ymin += levelH[i];
-        } else if ('B' == c || 'b' == c) {
-          xmin += levelW[i];
-          ymin += levelH[i];
-        } else if ('C' == c || 'c' == c) {
-          // nothing really
-        }
-        else if('D' == c || 'd' == c) {
-          xmin += levelW[i];
-        } else {
-          throw new RuntimeException("unexpected char: " + c);
+      for (int i = 0; i < token.length; i++) {
+        byte c = token.bytes[token.offset + i];
+        switch (c) {
+          case 'A':
+            ymin += levelH[i];
+            break;
+          case 'B':
+            xmin += levelW[i];
+            ymin += levelH[i];
+            break;
+          case 'C':
+            break;//nothing really
+          case 'D':
+            xmin += levelW[i];
+            break;
+          default:
+            throw new RuntimeException("unexpected char: " + c);
         }
       }
-      int len = token.length();
+      int len = token.length;
       double width, height;
       if (len > 0) {
         width = levelW[len-1];
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTree.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTree.java
index 64c8147..80bd54c 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTree.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTree.java
@@ -21,10 +21,12 @@
 import com.spatial4j.core.shape.Point;
 import com.spatial4j.core.shape.Rectangle;
 import com.spatial4j.core.shape.Shape;
+import org.apache.lucene.util.BytesRef;
 
 import java.nio.charset.Charset;
 import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
@@ -103,14 +105,14 @@
   private transient Cell worldCell;//cached
 
   /**
-   * Returns the level 0 cell which encompasses all spatial data. Equivalent to {@link #getCell(String)} with "".
-   * This cell is threadsafe, just like a spatial prefix grid is, although cells aren't
-   * generally threadsafe.
-   * TODO rename to getTopCell or is this fine?
+   * Returns the level 0 cell which encompasses all spatial data. Equivalent to {@link #getCell(byte[], int, int)} with
+   * no bytes. This cell is thread-safe, just like a spatial prefix grid is, although cells aren't
+   * generally thread-safe.
    */
-  public Cell getWorldCell() {
+  public Cell getWorldCell() {//another possible name: getTopCell
     if (worldCell == null) {
-      worldCell = getCell("");
+      worldCell = getCell(BytesRef.EMPTY_BYTES, 0, 0);
+      worldCell.getShape();//lazy load; make thread-safe
     }
     return worldCell;
   }
@@ -119,8 +121,6 @@
    * The cell for the specified token. The empty string should be equal to {@link #getWorldCell()}.
    * Precondition: Never called when token length > maxLevel.
    */
-  public abstract Cell getCell(String token);
-
   public abstract Cell getCell(byte[] bytes, int offset, int len);
 
   public final Cell getCell(byte[] bytes, int offset, int len, Cell target) {
@@ -215,40 +215,23 @@
    * A Point-optimized implementation of
    * {@link #getCells(com.spatial4j.core.shape.Shape, int, boolean, boolean)}. That
    * method in facts calls this for points.
-   * <p/>
-   * This implementation depends on {@link #getCell(String)} being fast, as its
-   * called repeatedly when incPlarents is true.
    */
   public List<Cell> getCells(Point p, int detailLevel, boolean inclParents) {
     Cell cell = getCell(p, detailLevel);
-    if (!inclParents) {
+    assert !cell.isLeaf();
+    if (!inclParents || detailLevel == 1) {
       return Collections.singletonList(cell);
     }
 
-    String endToken = cell.getTokenString();
-    assert endToken.length() == detailLevel;
-    List<Cell> cells = new ArrayList<>(detailLevel);
-    for (int i = 1; i < detailLevel; i++) {
-      cells.add(getCell(endToken.substring(0, i)));//TODO refactor: add a cell.getParent()
+    //fill in reverse order to be sorted
+    Cell[] cells = new Cell[detailLevel];
+    for (int i = detailLevel-1; true; i--) {
+      cells[i] = cell;
+      if (i == 0)
+        break;
+      cell = cell.getParent();
     }
-    cells.add(cell);
-    return cells;
+    return Arrays.asList(cells);
   }
 
-  /**
-   * Will add the trailing leaf byte for leaves. This isn't particularly efficient.
-   * @deprecated TODO remove; not used and not interesting, don't need collection in & out
-   */
-  public static List<String> cellsToTokenStrings(Collection<Cell> cells) {
-    List<String> tokens = new ArrayList<>((cells.size()));
-    for (Cell cell : cells) {
-      final String token = cell.getTokenString();
-      if (cell.isLeaf()) {
-        tokens.add(token + (char) Cell.LEAF_BYTE);
-      } else {
-        tokens.add(token);
-      }
-    }
-    return tokens;
-  }
 }
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTreeTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTreeTest.java
index e15fd1a..82f3d31 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTreeTest.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTreeTest.java
@@ -35,6 +35,9 @@
 import org.junit.Before;
 import org.junit.Test;
 
+import java.util.ArrayList;
+import java.util.List;
+
 public class SpatialPrefixTreeTest extends SpatialTestCase {
 
   //TODO plug in others and test them
@@ -56,9 +59,10 @@
     Cell c = trie.getWorldCell();
     assertEquals(0, c.getLevel());
     assertEquals(ctx.getWorldBounds(), c.getShape());
-    while(c.getLevel() < trie.getMaxLevels()) {
+    while (c.getLevel() < trie.getMaxLevels()) {
       prevC = c;
-      c = c.getSubCells().iterator().next();//TODO random which one?
+      List<Cell> subCells = new ArrayList<>(c.getSubCells());
+      c = subCells.get(random().nextInt(subCells.size()-1));
       
       assertEquals(prevC.getLevel()+1,c.getLevel());
       Rectangle prevNShape = (Rectangle) prevC.getShape();
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingBulkOutOfOrderScorer.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingBulkOutOfOrderScorer.java
index 0b2fa34..26a0a4f 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingBulkOutOfOrderScorer.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingBulkOutOfOrderScorer.java
@@ -18,93 +18,37 @@
  */
 
 import java.io.IOException;
-import java.lang.ref.WeakReference;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Map;
 import java.util.Random;
-import java.util.WeakHashMap;
 
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.util.VirtualMethod;
-
-/** A crazy {@link BulkScorer} that wraps a {@link Scorer}
+/** A crazy {@link BulkScorer} that wraps another {@link BulkScorer}
  *  but shuffles the order of the collected documents. */
 public class AssertingBulkOutOfOrderScorer extends BulkScorer {
 
+  final BulkScorer in;
   final Random random;
-  final Scorer scorer;
 
-  public AssertingBulkOutOfOrderScorer(Random random, Scorer scorer) {
+  public AssertingBulkOutOfOrderScorer(Random random, BulkScorer in) {
+    this.in = in;
     this.random = random;
-    this.scorer = scorer;
-  }
-
-  private void shuffle(int[] docIDs, float[] scores, int[] freqs, int size) {
-    for (int i = size - 1; i > 0; --i) {
-      final int other = random.nextInt(i + 1);
-
-      final int tmpDoc = docIDs[i];
-      docIDs[i] = docIDs[other];
-      docIDs[other] = tmpDoc;
-
-      final float tmpScore = scores[i];
-      scores[i] = scores[other];
-      scores[other] = tmpScore;
-      
-      final int tmpFreq = freqs[i];
-      freqs[i] = freqs[other];
-      freqs[other] = tmpFreq;
-    }
-  }
-
-  private static void flush(int[] docIDs, float[] scores, int[] freqs, int size,
-      FakeScorer scorer, Collector collector) throws IOException {
-    for (int i = 0; i < size; ++i) {
-      scorer.doc = docIDs[i];
-      scorer.freq = freqs[i];
-      scorer.score = scores[i];
-      collector.collect(scorer.doc);
-    }
   }
 
   @Override
-  public boolean score(Collector collector, int max) throws IOException {
-    if (scorer.docID() == -1) {
-      scorer.nextDoc();
-    }
+  public boolean score(LeafCollector collector, int max) throws IOException {
+    final RandomOrderCollector randomCollector = new RandomOrderCollector(random, collector);
+    final boolean remaining = in.score(randomCollector, max);
+    randomCollector.flush();
+    return remaining;
+  }
 
-    FakeScorer fake = new FakeScorer();
-    collector.setScorer(fake);
-
-    final int bufferSize = 1 + random.nextInt(100);
-    final int[] docIDs = new int[bufferSize];
-    final float[] scores = new float[bufferSize];
-    final int[] freqs = new int[bufferSize];
-
-    int buffered = 0;
-    int doc = scorer.docID();
-    while (doc < max) {
-      docIDs[buffered] = doc;
-      scores[buffered] = scorer.score();
-      freqs[buffered] = scorer.freq();
-
-      if (++buffered == bufferSize) {
-        shuffle(docIDs, scores, freqs, buffered);
-        flush(docIDs, scores, freqs, buffered, fake, collector);
-        buffered = 0;
-      }
-      doc = scorer.nextDoc();
-    }
-
-    shuffle(docIDs, scores, freqs, buffered);
-    flush(docIDs, scores, freqs, buffered, fake, collector);
-
-    return doc != Scorer.NO_MORE_DOCS;
+  @Override
+  public void score(LeafCollector collector) throws IOException {
+    final RandomOrderCollector randomCollector = new RandomOrderCollector(random, collector);
+    in.score(randomCollector);
+    randomCollector.flush();
   }
 
   @Override
   public String toString() {
-    return "AssertingBulkOutOfOrderScorer(" + scorer + ")";
+    return "AssertingBulkOutOfOrderScorer(" + in + ")";
   }
 }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingBulkScorer.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingBulkScorer.java
index 995f49a..50114aa 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingBulkScorer.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingBulkScorer.java
@@ -31,8 +31,8 @@
 /** Wraps a Scorer with additional checks */
 public class AssertingBulkScorer extends BulkScorer {
 
-  private static final VirtualMethod<BulkScorer> SCORE_COLLECTOR = new VirtualMethod<BulkScorer>(BulkScorer.class, "score", Collector.class);
-  private static final VirtualMethod<BulkScorer> SCORE_COLLECTOR_RANGE = new VirtualMethod<BulkScorer>(BulkScorer.class, "score", Collector.class, int.class);
+  private static final VirtualMethod<BulkScorer> SCORE_COLLECTOR = new VirtualMethod<BulkScorer>(BulkScorer.class, "score", LeafCollector.class);
+  private static final VirtualMethod<BulkScorer> SCORE_COLLECTOR_RANGE = new VirtualMethod<BulkScorer>(BulkScorer.class, "score", LeafCollector.class, int.class);
 
   public static BulkScorer wrap(Random random, BulkScorer other) {
     if (other == null || other instanceof AssertingBulkScorer) {
@@ -58,7 +58,7 @@
   }
 
   @Override
-  public void score(Collector collector) throws IOException {
+  public void score(LeafCollector collector) throws IOException {
     if (random.nextBoolean()) {
       try {
         final boolean remaining = in.score(collector, DocsEnum.NO_MORE_DOCS);
@@ -72,7 +72,7 @@
   }
 
   @Override
-  public boolean score(Collector collector, int max) throws IOException {
+  public boolean score(LeafCollector collector, int max) throws IOException {
     return in.score(collector, max);
   }
 
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingCollector.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingCollector.java
index 8ab2926..7aa8a2e 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingCollector.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingCollector.java
@@ -25,46 +25,42 @@
 /** Wraps another Collector and checks that
  *  acceptsDocsOutOfOrder is respected. */
 
-public class AssertingCollector extends Collector {
+public class AssertingCollector extends FilterCollector {
 
   public static Collector wrap(Random random, Collector other, boolean inOrder) {
     return other instanceof AssertingCollector ? other : new AssertingCollector(random, other, inOrder);
   }
 
   final Random random;
-  final Collector in;
   final boolean inOrder;
-  int lastCollected;
 
   AssertingCollector(Random random, Collector in, boolean inOrder) {
+    super(in);
     this.random = random;
-    this.in = in;
     this.inOrder = inOrder;
-    lastCollected = -1;
   }
 
   @Override
-  public void setScorer(Scorer scorer) throws IOException {
-    in.setScorer(AssertingScorer.getAssertingScorer(random, scorer));
-  }
+  public LeafCollector getLeafCollector(AtomicReaderContext context) throws IOException {
+    return new FilterLeafCollector(super.getLeafCollector(context)) {
 
-  @Override
-  public void collect(int doc) throws IOException {
-    if (inOrder || !acceptsDocsOutOfOrder()) {
-      assert doc > lastCollected : "Out of order : " + lastCollected + " " + doc;
-    }
-    in.collect(doc);
-    lastCollected = doc;
-  }
+      int lastCollected = -1;
 
-  @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
-    lastCollected = -1;
-  }
+      @Override
+      public void setScorer(Scorer scorer) throws IOException {
+        super.setScorer(AssertingScorer.getAssertingScorer(random, scorer));
+      }
 
-  @Override
-  public boolean acceptsDocsOutOfOrder() {
-    return in.acceptsDocsOutOfOrder();
+      @Override
+      public void collect(int doc) throws IOException {
+        if (inOrder || !acceptsDocsOutOfOrder()) {
+          assert doc > lastCollected : "Out of order : " + lastCollected + " " + doc;
+        }
+        in.collect(doc);
+        lastCollected = doc;
+      }
+
+    };
   }
 
 }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java
index 793b396..b075247 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java
@@ -79,22 +79,20 @@
     if (AssertingBulkScorer.shouldWrap(inScorer)) {
       // The incoming scorer already has a specialized
       // implementation for BulkScorer, so we should use it:
-      return AssertingBulkScorer.wrap(new Random(random.nextLong()), inScorer);
-    } else if (scoreDocsInOrder == false && random.nextBoolean()) {
+      inScorer = AssertingBulkScorer.wrap(new Random(random.nextLong()), inScorer);
+    } else if (random.nextBoolean()) {
+      // Let super wrap this.scorer instead, so we use
+      // AssertingScorer:
+      inScorer = super.bulkScorer(context, scoreDocsInOrder, acceptDocs);
+    }
+
+    if (scoreDocsInOrder == false && random.nextBoolean()) {
       // The caller claims it can handle out-of-order
       // docs; let's confirm that by pulling docs and
       // randomly shuffling them before collection:
-      //Scorer scorer = in.scorer(context, acceptDocs);
-      Scorer scorer = scorer(context, acceptDocs);
-
-      // Scorer should not be null if bulkScorer wasn't:
-      assert scorer != null;
-      return new AssertingBulkOutOfOrderScorer(new Random(random.nextLong()), scorer);
-    } else {
-      // Let super wrap this.scorer instead, so we use
-      // AssertingScorer:
-      return super.bulkScorer(context, scoreDocsInOrder, acceptDocs);
+      inScorer = new AssertingBulkOutOfOrderScorer(new Random(random.nextLong()), inScorer);
     }
+    return inScorer;
   }
 
   @Override
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java b/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java
index 034396c..042ad9b 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java
@@ -123,7 +123,7 @@
   /**
    * Just collects document ids into a set.
    */
-  public static class SetCollector extends Collector {
+  public static class SetCollector extends SimpleCollector {
     final Set<Integer> bag;
     public SetCollector(Set<Integer> bag) {
       this.bag = bag;
@@ -136,7 +136,7 @@
       bag.add(Integer.valueOf(doc + base));
     }
     @Override
-    public void setNextReader(AtomicReaderContext context) {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       base = context.docBase;
     }
     @Override
@@ -464,7 +464,7 @@
    *
    * @see CheckHits#verifyExplanation
    */
-  public static class ExplanationAsserter extends Collector {
+  public static class ExplanationAsserter extends SimpleCollector {
 
     Query q;
     IndexSearcher s;
@@ -508,7 +508,7 @@
                         exp.isMatch());
     }
     @Override
-    public void setNextReader(AtomicReaderContext context) {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       base = context.docBase;
     }
     @Override
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
index cd00f74..8656ef2 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
@@ -249,7 +249,7 @@
         final float maxDiff = 1e-5f;
         final AtomicReader lastReader[] = {null};
 
-        s.search(q, new Collector() {
+        s.search(q, new SimpleCollector() {
           private Scorer sc;
           private Scorer scorer;
           private int leafPtr;
@@ -305,7 +305,7 @@
           }
 
           @Override
-          public void setNextReader(AtomicReaderContext context) throws IOException {
+          protected void doSetNextReader(AtomicReaderContext context) throws IOException {
             // confirm that skipping beyond the last doc, on the
             // previous reader, hits NO_MORE_DOCS
             if (lastReader[0] != null) {
@@ -357,7 +357,7 @@
     final int lastDoc[] = {-1};
     final AtomicReader lastReader[] = {null};
     final List<AtomicReaderContext> context = s.getTopReaderContext().leaves();
-    s.search(q,new Collector() {
+    s.search(q,new SimpleCollector() {
       private Scorer scorer;
       private int leafPtr;
       private Bits liveDocs;
@@ -392,7 +392,7 @@
       }
 
       @Override
-      public void setNextReader(AtomicReaderContext context) throws IOException {
+      protected void doSetNextReader(AtomicReaderContext context) throws IOException {
         // confirm that skipping beyond the last doc, on the
         // previous reader, hits NO_MORE_DOCS
         if (lastReader[0] != null) {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/RandomOrderCollector.java b/lucene/test-framework/src/java/org/apache/lucene/search/RandomOrderCollector.java
new file mode 100644
index 0000000..c91835b
--- /dev/null
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/RandomOrderCollector.java
@@ -0,0 +1,106 @@
+package org.apache.lucene.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Random;
+
+/** Randomize collection order. Don't forget to call {@link #flush()} when
+ *  collection is finished to collect buffered documents. */
+final class RandomOrderCollector extends FilterLeafCollector {
+
+  final Random random;
+  Scorer scorer;
+  FakeScorer fakeScorer;
+
+  int buffered;
+  final int bufferSize;
+  final int[] docIDs;
+  final float[] scores;
+  final int[] freqs;
+
+  RandomOrderCollector(Random random, LeafCollector in) {
+    super(in);
+    if (!in.acceptsDocsOutOfOrder()) {
+      throw new IllegalArgumentException();
+    }
+    this.random = random;
+    bufferSize = 1 + random.nextInt(100);
+    docIDs = new int[bufferSize];
+    scores = new float[bufferSize];
+    freqs = new int[bufferSize];
+    buffered = 0;
+  }
+
+  @Override
+  public void setScorer(Scorer scorer) throws IOException {
+    this.scorer = scorer;
+    fakeScorer = new FakeScorer();
+    in.setScorer(fakeScorer);
+  }
+
+  private void shuffle() {
+    for (int i = buffered - 1; i > 0; --i) {
+      final int other = random.nextInt(i + 1);
+
+      final int tmpDoc = docIDs[i];
+      docIDs[i] = docIDs[other];
+      docIDs[other] = tmpDoc;
+
+      final float tmpScore = scores[i];
+      scores[i] = scores[other];
+      scores[other] = tmpScore;
+
+      final int tmpFreq = freqs[i];
+      freqs[i] = freqs[other];
+      freqs[other] = tmpFreq;
+    }
+  }
+
+  public void flush() throws IOException {
+    shuffle();
+    for (int i = 0; i < buffered; ++i) {
+      fakeScorer.doc = docIDs[i];
+      fakeScorer.freq = freqs[i];
+      fakeScorer.score = scores[i];
+      in.collect(fakeScorer.doc);
+    }
+    buffered = 0;
+  }
+
+  @Override
+  public void collect(int doc) throws IOException {
+    docIDs[buffered] = doc;
+    scores[buffered] = scorer.score();
+    try {
+      freqs[buffered] = scorer.freq();
+    } catch (UnsupportedOperationException e) {
+      freqs[buffered] = -1;
+    }
+    if (++buffered == bufferSize) {
+      flush();
+    }
+  }
+
+  @Override
+  public boolean acceptsDocsOutOfOrder() {
+    return in.acceptsDocsOutOfOrder();
+  }
+
+}
+
diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java
index 142ab38..25ae6b3 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java
@@ -622,7 +622,8 @@
     return size;
   }
 
-  private boolean assertNoUnreferencedFilesOnClose = true;
+  // NOTE: This is off by default; see LUCENE-5574
+  private boolean assertNoUnreferencedFilesOnClose;
 
   public void setAssertNoUnrefencedFilesOnClose(boolean v) {
     assertNoUnreferencedFilesOnClose = v;
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
index a9d23c7..160470f 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
@@ -872,28 +872,13 @@
       int maxNumThreadStates = rarely(r) ? TestUtil.nextInt(r, 5, 20) // crazy value
           : TestUtil.nextInt(r, 1, 4); // reasonable value
 
-      Method setIndexerThreadPoolMethod = null;
-      try {
-        // Retrieve the package-private setIndexerThreadPool
-        // method:
-        for(Method m : IndexWriterConfig.class.getDeclaredMethods()) {
-          if (m.getName().equals("setIndexerThreadPool")) {
-            m.setAccessible(true);
-            setIndexerThreadPoolMethod = m;
-            break;
-          }
-        }
-      } catch (Exception e) {
-        // Should not happen?
-        throw new RuntimeException(e);
-      }
-
-      if (setIndexerThreadPoolMethod == null) {
-        throw new RuntimeException("failed to lookup IndexWriterConfig.setIndexerThreadPool method");
-      }
-
       try {
         if (rarely(r)) {
+          // Retrieve the package-private setIndexerThreadPool
+          // method:
+          Method setIndexerThreadPoolMethod = IndexWriterConfig.class.getDeclaredMethod("setIndexerThreadPool",
+            Class.forName("org.apache.lucene.index.DocumentsWriterPerThreadPool"));
+          setIndexerThreadPoolMethod.setAccessible(true);
           Class<?> clazz = Class.forName("org.apache.lucene.index.RandomDocumentsWriterPerThreadPool");
           Constructor<?> ctor = clazz.getConstructor(int.class, Random.class);
           ctor.setAccessible(true);
@@ -904,7 +889,7 @@
           c.setMaxThreadStates(maxNumThreadStates);
         }
       } catch (Exception e) {
-        throw new RuntimeException(e);
+        Rethrow.rethrow(e);
       }
     }
 
@@ -1097,7 +1082,8 @@
       }
       return wrapped;
     } catch (Exception e) {
-      throw new RuntimeException(e);
+      Rethrow.rethrow(e);
+      throw null; // dummy to prevent compiler failure
     }
   }
 
@@ -1278,7 +1264,8 @@
       // try empty ctor
       return clazz.newInstance();
     } catch (Exception e) {
-      throw new RuntimeException(e);
+      Rethrow.rethrow(e);
+      throw null; // dummy to prevent compiler failure
     }
   }
   
@@ -1400,20 +1387,30 @@
   public static IndexSearcher newSearcher(IndexReader r) {
     return newSearcher(r, true);
   }
-  
+
+  /**
+   * Create a new searcher over the reader. This searcher might randomly use
+   * threads.
+   */
+  public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) {
+    return newSearcher(r, maybeWrap, true);
+  }
+
   /**
    * Create a new searcher over the reader. This searcher might randomly use
    * threads. if <code>maybeWrap</code> is true, this searcher might wrap the
-   * reader with one that returns null for getSequentialSubReaders.
+   * reader with one that returns null for getSequentialSubReaders. If
+   * <code>wrapWithAssertions</code> is true, this searcher might be an
+   * {@link AssertingIndexSearcher} instance.
    */
-  public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) {
+  public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap, boolean wrapWithAssertions) {
     Random random = random();
     if (usually()) {
       if (maybeWrap) {
         try {
           r = maybeWrapReader(r);
         } catch (IOException e) {
-          throw new AssertionError(e);
+          Rethrow.rethrow(e);
         }
       }
       // TODO: this whole check is a coverage hack, we should move it to tests for various filterreaders.
@@ -1424,10 +1421,15 @@
         try {
           TestUtil.checkReader(r);
         } catch (IOException e) {
-          throw new AssertionError(e);
+          Rethrow.rethrow(e);
         }
       }
-      IndexSearcher ret = random.nextBoolean() ? new AssertingIndexSearcher(random, r) : new AssertingIndexSearcher(random, r.getContext());
+      final IndexSearcher ret;
+      if (wrapWithAssertions) {
+        ret = random.nextBoolean() ? new AssertingIndexSearcher(random, r) : new AssertingIndexSearcher(random, r.getContext());
+      } else {
+        ret = random.nextBoolean() ? new IndexSearcher(r) : new IndexSearcher(r.getContext());
+      }
       ret.setSimilarity(classEnvRule.similarity);
       return ret;
     } else {
@@ -1454,9 +1456,16 @@
          }
        });
       }
-      IndexSearcher ret = random.nextBoolean() 
-          ? new AssertingIndexSearcher(random, r, ex)
-          : new AssertingIndexSearcher(random, r.getContext(), ex);
+      IndexSearcher ret;
+      if (wrapWithAssertions) {
+        ret = random.nextBoolean()
+            ? new AssertingIndexSearcher(random, r, ex)
+            : new AssertingIndexSearcher(random, r.getContext(), ex);
+      } else {
+        ret = random.nextBoolean()
+            ? new IndexSearcher(r, ex)
+            : new IndexSearcher(r.getContext(), ex);
+      }
       ret.setSimilarity(classEnvRule.similarity);
       return ret;
     }
diff --git a/solr/core/src/java/org/apache/solr/analytics/accumulator/BasicAccumulator.java b/solr/core/src/java/org/apache/solr/analytics/accumulator/BasicAccumulator.java
index 304c0a2..fdcf66b 100644
--- a/solr/core/src/java/org/apache/solr/analytics/accumulator/BasicAccumulator.java
+++ b/solr/core/src/java/org/apache/solr/analytics/accumulator/BasicAccumulator.java
@@ -80,7 +80,7 @@
   }
   
   @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
+  protected void doSetNextReader(AtomicReaderContext context) throws IOException {
     this.context = context;
     for (StatsCollector counter : statsCollectors) {
       counter.setNextReader(context);
diff --git a/solr/core/src/java/org/apache/solr/analytics/accumulator/FacetingAccumulator.java b/solr/core/src/java/org/apache/solr/analytics/accumulator/FacetingAccumulator.java
index 61ed6e1..fb6d81d 100644
--- a/solr/core/src/java/org/apache/solr/analytics/accumulator/FacetingAccumulator.java
+++ b/solr/core/src/java/org/apache/solr/analytics/accumulator/FacetingAccumulator.java
@@ -155,8 +155,8 @@
    * @throws IOException if there is an error setting the next reader
    */
   @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
-    super.setNextReader(context);
+  protected void doSetNextReader(AtomicReaderContext context) throws IOException {
+    super.doSetNextReader(context);
     for( Map<String,StatsCollector[]> valueList : fieldFacetCollectors.values() ){
       for (StatsCollector[] statsCollectorList : valueList.values()) {
         for (StatsCollector statsCollector : statsCollectorList) {
@@ -165,7 +165,7 @@
       }
     }
     for (FieldFacetAccumulator fa : facetAccumulators) {
-      fa.setNextReader(context);
+      fa.getLeafCollector(context);
     }
   }
   
@@ -175,7 +175,7 @@
    * @throws IOException if there is an error setting the next reader
    */
   public void setRangeStatsCollectorReaders(AtomicReaderContext context) throws IOException {
-    super.setNextReader(context);
+    super.getLeafCollector(context);
     for( Map<String,StatsCollector[]> rangeList : rangeFacetCollectors.values() ){
       for (StatsCollector[] statsCollectorList : rangeList.values()) {
         for (StatsCollector statsCollector : statsCollectorList) {
@@ -192,7 +192,7 @@
    * @throws IOException if there is an error setting the next reader
    */
   public void setQueryStatsCollectorReaders(AtomicReaderContext context) throws IOException {
-    super.setNextReader(context);
+    super.getLeafCollector(context);
     for( Map<String,StatsCollector[]> queryList : queryFacetCollectors.values() ){
       for (StatsCollector[] statsCollectorList : queryList.values()) {
         for (StatsCollector statsCollector : statsCollectorList) {
diff --git a/solr/core/src/java/org/apache/solr/analytics/accumulator/ValueAccumulator.java b/solr/core/src/java/org/apache/solr/analytics/accumulator/ValueAccumulator.java
index ecc74ef..90b8713 100644
--- a/solr/core/src/java/org/apache/solr/analytics/accumulator/ValueAccumulator.java
+++ b/solr/core/src/java/org/apache/solr/analytics/accumulator/ValueAccumulator.java
@@ -20,20 +20,14 @@
 import java.io.IOException;
 
 import org.apache.lucene.index.AtomicReaderContext;
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.SimpleCollector;
 import org.apache.solr.common.util.NamedList;
 
 /**
  * Abstract Collector that manages all StatsCollectors, Expressions and Facets.
  */
-public abstract class ValueAccumulator extends Collector {
-
-  /**
-   * @param context The context to read documents from.
-   * @throws IOException if setting next reader fails
-   */
-  public abstract void setNextReader(AtomicReaderContext context) throws IOException;
+public abstract class ValueAccumulator extends SimpleCollector {
   
   /**
    * Finalizes the statistics within each StatsCollector.
@@ -51,9 +45,4 @@
     return true;
   }
 
-  @Override
-  public void setScorer(Scorer scorer) throws IOException {
-    // NOP
-  }
-  
 }
diff --git a/solr/core/src/java/org/apache/solr/analytics/accumulator/facet/FieldFacetAccumulator.java b/solr/core/src/java/org/apache/solr/analytics/accumulator/facet/FieldFacetAccumulator.java
index 9376909..e2cf416 100644
--- a/solr/core/src/java/org/apache/solr/analytics/accumulator/facet/FieldFacetAccumulator.java
+++ b/solr/core/src/java/org/apache/solr/analytics/accumulator/facet/FieldFacetAccumulator.java
@@ -82,7 +82,7 @@
    * Move to the next set of documents to add to the field facet.
    */
   @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException { 
+  protected void doSetNextReader(AtomicReaderContext context) throws IOException {
     if (multiValued) {
       setValues = context.reader().getSortedSetDocValues(name);
     } else {
diff --git a/solr/core/src/java/org/apache/solr/analytics/accumulator/facet/QueryFacetAccumulator.java b/solr/core/src/java/org/apache/solr/analytics/accumulator/facet/QueryFacetAccumulator.java
index f0d6b4a..3a268ee 100644
--- a/solr/core/src/java/org/apache/solr/analytics/accumulator/facet/QueryFacetAccumulator.java
+++ b/solr/core/src/java/org/apache/solr/analytics/accumulator/facet/QueryFacetAccumulator.java
@@ -51,7 +51,7 @@
    * Update the readers of the queryFacet {@link StatsCollector}s in FacetingAccumulator
    */
   @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
+  protected void doSetNextReader(AtomicReaderContext context) throws IOException {
     parent.setQueryStatsCollectorReaders(context);
   }
 
diff --git a/solr/core/src/java/org/apache/solr/analytics/accumulator/facet/RangeFacetAccumulator.java b/solr/core/src/java/org/apache/solr/analytics/accumulator/facet/RangeFacetAccumulator.java
index dd29c1c..8c07c4f 100644
--- a/solr/core/src/java/org/apache/solr/analytics/accumulator/facet/RangeFacetAccumulator.java
+++ b/solr/core/src/java/org/apache/solr/analytics/accumulator/facet/RangeFacetAccumulator.java
@@ -43,7 +43,7 @@
    * Update the readers of the rangeFacet {@link StatsCollector}s in FacetingAccumulator
    */
   @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
+  protected void doSetNextReader(AtomicReaderContext context) throws IOException {
     parent.setRangeStatsCollectorReaders(context);
   }
 
diff --git a/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsStats.java b/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsStats.java
index c1ec21f..adc6807 100644
--- a/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsStats.java
+++ b/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsStats.java
@@ -113,7 +113,7 @@
         }
 
         if (disi != null) {
-          accumulator.setNextReader(context);
+          accumulator.getLeafCollector(context);
           int doc = disi.nextDoc();
           while( doc != DocIdSetIterator.NO_MORE_DOCS){
             // Add a document to the statistics being generated
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java
index f4abda8..905071c 100644
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java
+++ b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java
@@ -557,14 +557,14 @@
       Snapshot snapshot = timer.getSnapshot();
       lst.add("totalTime", timer.getSum());
       lst.add("avgRequestsPerMinute", timer.getMeanRate());
-      lst.add("5minRateReqsPerMinute", timer.getFiveMinuteRate());
-      lst.add("15minRateReqsPerMinute", timer.getFifteenMinuteRate());
+      lst.add("5minRateRequestsPerMinute", timer.getFiveMinuteRate());
+      lst.add("15minRateRequestsPerMinute", timer.getFifteenMinuteRate());
       lst.add("avgTimePerRequest", timer.getMean());
       lst.add("medianRequestTime", snapshot.getMedian());
-      lst.add("75thPcRequestTime", snapshot.get75thPercentile());
-      lst.add("95thPcRequestTime", snapshot.get95thPercentile());
-      lst.add("99thPcRequestTime", snapshot.get99thPercentile());
-      lst.add("999thPcRequestTime", snapshot.get999thPercentile());
+      lst.add("75thPctlRequestTime", snapshot.get75thPercentile());
+      lst.add("95thPctlRequestTime", snapshot.get95thPercentile());
+      lst.add("99thPctlRequestTime", snapshot.get99thPercentile());
+      lst.add("999thPctlRequestTime", snapshot.get999thPercentile());
     }
     results.add("overseer_operations", overseerStats);
     results.add("collection_operations", collectionStats);
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
index 99465b7..64b5690 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
@@ -20,9 +20,11 @@
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.search.FieldCache;
@@ -52,9 +54,12 @@
 import org.apache.solr.util.plugin.SolrCoreAware;
 import org.apache.solr.core.PluginInfo;
 import org.apache.solr.core.SolrCore;
+import com.carrotsearch.hppc.IntObjectMap;
 import com.carrotsearch.hppc.IntObjectOpenHashMap;
 import com.carrotsearch.hppc.IntOpenHashSet;
 import com.carrotsearch.hppc.cursors.IntObjectCursor;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+
 import java.io.IOException;
 import java.net.MalformedURLException;
 import java.net.URL;
@@ -210,9 +215,9 @@
     }
 
     searcher.search(query, pfilter.filter, collector);
-    IntObjectOpenHashMap groups = groupExpandCollector.getGroups();
+    IntObjectMap groups = groupExpandCollector.getGroups();
     Iterator<IntObjectCursor> it = groups.iterator();
-    Map<String, DocSlice> outMap = new HashMap();
+    Map<String, DocSlice> outMap = new HashMap<>();
     BytesRef bytesRef = new BytesRef();
     CharsRef charsRef = new CharsRef();
     FieldType fieldType = searcher.getSchema().getField(field).getType();
@@ -292,24 +297,21 @@
     rb.rsp.add("expanded", expanded);
   }
 
-  private class GroupExpandCollector extends Collector {
+  private class GroupExpandCollector implements Collector {
     private SortedDocValues docValues;
-    private IntObjectOpenHashMap groups;
+    private IntObjectMap<Collector> groups;
     private int docBase;
     private FixedBitSet groupBits;
     private IntOpenHashSet collapsedSet;
-    private List<Collector> collectors;
 
     public GroupExpandCollector(SortedDocValues docValues, FixedBitSet groupBits, IntOpenHashSet collapsedSet, int limit, Sort sort) throws IOException {
       int numGroups = collapsedSet.size();
-      groups = new IntObjectOpenHashMap(numGroups*2);
-      collectors = new ArrayList();
+      groups = new IntObjectOpenHashMap<>(numGroups*2);
       DocIdSetIterator iterator = groupBits.iterator();
       int group = -1;
       while((group = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
         Collector collector = (sort == null) ? TopScoreDocCollector.create(limit, true) : TopFieldCollector.create(sort,limit, false, false,false, true);
         groups.put(group, collector);
-        collectors.add(collector);
       }
 
       this.collapsedSet = collapsedSet;
@@ -317,35 +319,42 @@
       this.docValues = docValues;
     }
 
-    public IntObjectOpenHashMap getGroups() {
-      return this.groups;
-    }
-
-    public boolean acceptsDocsOutOfOrder() {
-      return false;
-    }
-
-    public void collect(int docId) throws IOException {
-      int doc = docId+docBase;
-      int ord = docValues.getOrd(doc);
-      if(ord > -1 && groupBits.get(ord) && !collapsedSet.contains(doc)) {
-        Collector c = (Collector)groups.get(ord);
-        c.collect(docId);
+    public LeafCollector getLeafCollector(AtomicReaderContext context) throws IOException {
+      final int docBase = context.docBase;
+      final IntObjectMap<LeafCollector> leafCollectors = new IntObjectOpenHashMap<>();
+      for (IntObjectCursor<Collector> entry : groups) {
+        leafCollectors.put(entry.key, entry.value.getLeafCollector(context));
       }
+      return new LeafCollector() {
+        
+        @Override
+        public void setScorer(Scorer scorer) throws IOException {
+          for (ObjectCursor<LeafCollector> c : leafCollectors.values()) {
+            c.value.setScorer(scorer);
+          }
+        }
+        
+        @Override
+        public void collect(int docId) throws IOException {
+          int doc = docId+docBase;
+          int ord = docValues.getOrd(doc);
+          if(ord > -1 && groupBits.get(ord) && !collapsedSet.contains(doc)) {
+            LeafCollector c = leafCollectors.get(ord);
+            c.collect(docId);
+          }
+        }
+        
+        @Override
+        public boolean acceptsDocsOutOfOrder() {
+          return false;
+        }
+      };
     }
 
-    public void setNextReader(AtomicReaderContext context) throws IOException {
-      this.docBase = context.docBase;
-      for(Collector c : collectors) {
-        c.setNextReader(context);
-      }
+    public IntObjectMap<Collector> getGroups() {
+      return groups;
     }
 
-    public void setScorer(Scorer scorer) throws IOException {
-      for(Collector c : collectors) {
-        c.setScorer(scorer);
-      }
-    }
   }
 
   ////////////////////////////////////////////
@@ -372,4 +381,4 @@
       throw new RuntimeException(e);
     }
   }
-}
\ No newline at end of file
+}
diff --git a/solr/core/src/java/org/apache/solr/schema/LatLonType.java b/solr/core/src/java/org/apache/solr/schema/LatLonType.java
index 27157d2..2763c84 100644
--- a/solr/core/src/java/org/apache/solr/schema/LatLonType.java
+++ b/solr/core/src/java/org/apache/solr/schema/LatLonType.java
@@ -23,6 +23,7 @@
 import java.util.Set;
 
 import com.spatial4j.core.shape.Point;
+
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.IndexReader;
@@ -30,6 +31,7 @@
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.valuesource.VectorValueSource;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.ComplexExplanation;
@@ -51,6 +53,7 @@
 import com.spatial4j.core.context.SpatialContext;
 import com.spatial4j.core.distance.DistanceUtils;
 import com.spatial4j.core.shape.Rectangle;
+
 import org.apache.solr.util.SpatialUtils;
 
 
@@ -522,14 +525,14 @@
     @Override
     public void collect(int doc) throws IOException {
       spatialScorer.doc = doc;
-      if (spatialScorer.match()) delegate.collect(doc);
+      if (spatialScorer.match()) leafDelegate.collect(doc);
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
+      super.doSetNextReader(context);
       maxdoc = context.reader().maxDoc();
       spatialScorer = new SpatialScorer(context, null, weight, 1.0f);
-      super.setNextReader(context);
     }
   }
 
diff --git a/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
index 601790c..93ce79c 100644
--- a/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
+++ b/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
@@ -34,11 +34,15 @@
 import org.apache.lucene.queries.function.FunctionQuery;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.FilterCollector;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TopFieldCollector;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.FixedBitSet;
@@ -340,7 +344,7 @@
 
         IntOpenHashSet boostDocs = getBoostDocs(searcher, this.boosted);
 
-        if(this.min != null || this.max != null) {
+        if (this.min != null || this.max != null) {
 
           return new CollapsingFieldValueCollector(maxDoc,
                                                    leafCount,
@@ -436,7 +440,6 @@
     private SortedDocValues values;
     private int[] ords;
     private float[] scores;
-    private int docBase;
     private int maxDoc;
     private int nullPolicy;
     private float nullScore = -Float.MAX_VALUE;
@@ -489,7 +492,7 @@
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       this.contexts[context.ord] = context;
       this.docBase = context.docBase;
     }
@@ -546,9 +549,9 @@
       int currentContext = 0;
       int currentDocBase = 0;
       int nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
-      delegate.setNextReader(contexts[currentContext]);
+      leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
       DummyScorer dummy = new DummyScorer();
-      delegate.setScorer(dummy);
+      leafDelegate.setScorer(dummy);
       DocIdSetIterator it = collapsedSet.iterator();
       int docId = -1;
       int nullScoreIndex = 0;
@@ -571,13 +574,13 @@
           currentContext++;
           currentDocBase = contexts[currentContext].docBase;
           nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
-          delegate.setNextReader(contexts[currentContext]);
-          delegate.setScorer(dummy);
+          leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
+          leafDelegate.setScorer(dummy);
         }
 
         int contextDoc = docId-currentDocBase;
         dummy.docId = contextDoc;
-        delegate.collect(contextDoc);
+        leafDelegate.collect(contextDoc);
       }
 
       if(delegate instanceof DelegatingCollector) {
@@ -590,7 +593,6 @@
     private AtomicReaderContext[] contexts;
     private SortedDocValues values;
 
-    private int docBase;
     private int maxDoc;
     private int nullPolicy;
 
@@ -640,7 +642,7 @@
       this.fieldValueCollapse.setScorer(scorer);
     }
 
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    public void doSetNextReader(AtomicReaderContext context) throws IOException {
       this.contexts[context.ord] = context;
       this.docBase = context.docBase;
       this.fieldValueCollapse.setNextReader(context);
@@ -660,9 +662,9 @@
       int currentContext = 0;
       int currentDocBase = 0;
       int nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
-      delegate.setNextReader(contexts[currentContext]);
+      leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
       DummyScorer dummy = new DummyScorer();
-      delegate.setScorer(dummy);
+      leafDelegate.setScorer(dummy);
       DocIdSetIterator it = fieldValueCollapse.getCollapsedSet().iterator();
       int docId = -1;
       int nullScoreIndex = 0;
@@ -689,13 +691,13 @@
           currentContext++;
           currentDocBase = contexts[currentContext].docBase;
           nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
-          delegate.setNextReader(contexts[currentContext]);
-          delegate.setScorer(dummy);
+          leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
+          leafDelegate.setScorer(dummy);
         }
 
         int contextDoc = docId-currentDocBase;
         dummy.docId = contextDoc;
-        delegate.collect(contextDoc);
+        leafDelegate.collect(contextDoc);
       }
 
       if(delegate instanceof DelegatingCollector) {
diff --git a/solr/core/src/java/org/apache/solr/search/DelegatingCollector.java b/solr/core/src/java/org/apache/solr/search/DelegatingCollector.java
index 97045e8..06b9658 100644
--- a/solr/core/src/java/org/apache/solr/search/DelegatingCollector.java
+++ b/solr/core/src/java/org/apache/solr/search/DelegatingCollector.java
@@ -18,21 +18,23 @@
 package org.apache.solr.search;
 
 
-import org.apache.lucene.index.IndexReader;
+import java.io.IOException;
+
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.Scorer;
-
-import java.io.IOException;
+import org.apache.lucene.search.SimpleCollector;
 
 
 /** A simple delegating collector where one can set the delegate after creation */
-public class DelegatingCollector extends Collector {
+public class DelegatingCollector extends SimpleCollector {
 
   /* for internal testing purposes only to determine the number of times a delegating collector chain was used */
   public static int setLastDelegateCount;
 
   protected Collector delegate;
+  protected LeafCollector leafDelegate;
   protected Scorer scorer;
   protected AtomicReaderContext context;
   protected int docBase;
@@ -56,24 +58,26 @@
   @Override
   public void setScorer(Scorer scorer) throws IOException {
     this.scorer = scorer;
-    delegate.setScorer(scorer);
+    if (leafDelegate != null) {
+      leafDelegate.setScorer(scorer);
+    }
   }
 
   @Override
   public void collect(int doc) throws IOException {
-    delegate.collect(doc);
+    leafDelegate.collect(doc);
   }
 
   @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
+  protected void doSetNextReader(AtomicReaderContext context) throws IOException {
     this.context = context;
     this.docBase = context.docBase;
-    delegate.setNextReader(context);
+    leafDelegate = delegate.getLeafCollector(context);
   }
 
   @Override
   public boolean acceptsDocsOutOfOrder() {
-    return delegate.acceptsDocsOutOfOrder();
+    return leafDelegate.acceptsDocsOutOfOrder();
   }
 
   public void finish() throws IOException {
diff --git a/solr/core/src/java/org/apache/solr/search/DocSetCollector.java b/solr/core/src/java/org/apache/solr/search/DocSetCollector.java
index 76c3660..cbc179b 100644
--- a/solr/core/src/java/org/apache/solr/search/DocSetCollector.java
+++ b/solr/core/src/java/org/apache/solr/search/DocSetCollector.java
@@ -20,15 +20,16 @@
 import java.io.IOException;
 
 import org.apache.lucene.index.AtomicReaderContext;
-import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.util.FixedBitSet;
 
 /**
  *
  */
 
-public class DocSetCollector extends Collector {
+public class DocSetCollector extends SimpleCollector {
   int pos=0;
   FixedBitSet bits;
   final int maxDoc;
@@ -84,7 +85,7 @@
   }
 
   @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
+  protected void doSetNextReader(AtomicReaderContext context) throws IOException {
     this.base = context.docBase;
   }
 
diff --git a/solr/core/src/java/org/apache/solr/search/DocSetDelegateCollector.java b/solr/core/src/java/org/apache/solr/search/DocSetDelegateCollector.java
deleted file mode 100644
index a73d77c..0000000
--- a/solr/core/src/java/org/apache/solr/search/DocSetDelegateCollector.java
+++ /dev/null
@@ -1,84 +0,0 @@
-package org.apache.solr.search;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-
-import org.apache.lucene.index.AtomicReaderContext;
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.util.FixedBitSet;
-
-/**
- *
- */
-public class DocSetDelegateCollector extends DocSetCollector {
-  final Collector collector;
-
-  public DocSetDelegateCollector(int smallSetSize, int maxDoc, Collector collector) {
-    super(smallSetSize, maxDoc);
-    this.collector = collector;
-  }
-
-  @Override
-  public void collect(int doc) throws IOException {
-    collector.collect(doc);
-
-    doc += base;
-    // optimistically collect the first docs in an array
-    // in case the total number will be small enough to represent
-    // as a small set like SortedIntDocSet instead...
-    // Storing in this array will be quicker to convert
-    // than scanning through a potentially huge bit vector.
-    // FUTURE: when search methods all start returning docs in order, maybe
-    // we could have a ListDocSet() and use the collected array directly.
-    if (pos < scratch.length) {
-      scratch[pos]=doc;
-    } else {
-      // this conditional could be removed if BitSet was preallocated, but that
-      // would take up more memory, and add more GC time...
-      if (bits==null) bits = new FixedBitSet(maxDoc);
-      bits.set(doc);
-    }
-
-    pos++;
-  }
-
-  @Override
-  public DocSet getDocSet() {
-    if (pos<=scratch.length) {
-      // assumes docs were collected in sorted order!
-      return new SortedIntDocSet(scratch, pos);
-    } else {
-      // set the bits for ids that were collected in the array
-      for (int i=0; i<scratch.length; i++) bits.set(scratch[i]);
-      return new BitDocSet(bits,pos);
-    }
-  }
-
-  @Override
-  public void setScorer(Scorer scorer) throws IOException {
-    collector.setScorer(scorer);
-  }
-
-  @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
-    collector.setNextReader(context);
-    this.base = context.docBase;
-  }
-}
diff --git a/solr/core/src/java/org/apache/solr/search/EarlyTerminatingCollector.java b/solr/core/src/java/org/apache/solr/search/EarlyTerminatingCollector.java
index b9eaca6..200d326 100644
--- a/solr/core/src/java/org/apache/solr/search/EarlyTerminatingCollector.java
+++ b/solr/core/src/java/org/apache/solr/search/EarlyTerminatingCollector.java
@@ -20,67 +20,70 @@
 import java.io.IOException;
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.FilterLeafCollector;
+import org.apache.lucene.search.FilterCollector;
 /**
  * <p>
- *  A wrapper {@link Collector} that throws {@link EarlyTerminatingCollectorException}) 
+ *  A wrapper {@link Collector} that throws {@link EarlyTerminatingCollectorException})
  *  once a specified maximum number of documents are collected.
  * </p>
  */
-public class EarlyTerminatingCollector extends Collector {
+public class EarlyTerminatingCollector extends FilterCollector {
 
   private final int maxDocsToCollect;
-  private final Collector delegate;
 
   private int numCollected = 0;
   private int prevReaderCumulativeSize = 0;
-  private int currentReaderSize = 0;  
+  private int currentReaderSize = 0;
 
   /**
    * <p>
-   *  Wraps a {@link Collector}, throwing {@link EarlyTerminatingCollectorException} 
+   *  Wraps a {@link Collector}, throwing {@link EarlyTerminatingCollectorException}
    *  once the specified maximum is reached.
    * </p>
    * @param delegate - the Collector to wrap.
    * @param maxDocsToCollect - the maximum number of documents to Collect
-   * 
+   *
    */
   public EarlyTerminatingCollector(Collector delegate, int maxDocsToCollect) {
+    super(delegate);
     assert 0 < maxDocsToCollect;
     assert null != delegate;
 
-    this.delegate = delegate;
     this.maxDocsToCollect = maxDocsToCollect;
   }
 
-  /**
-   * This collector requires that docs be collected in order, otherwise
-   * the computed number of scanned docs in the resulting 
-   * {@link EarlyTerminatingCollectorException} will be meaningless.
-   */
   @Override
-  public boolean acceptsDocsOutOfOrder() {
-    return false;
-  }
-
-  @Override
-  public void collect(int doc) throws IOException {
-    delegate.collect(doc);
-    numCollected++;  
-    if(maxDocsToCollect <= numCollected) {
-      throw new EarlyTerminatingCollectorException
-        (numCollected, prevReaderCumulativeSize + (doc + 1));
-    }
-  }
-  @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
+  public LeafCollector getLeafCollector(AtomicReaderContext context)
+      throws IOException {
     prevReaderCumulativeSize += currentReaderSize; // not current any more
     currentReaderSize = context.reader().maxDoc() - 1;
-    delegate.setNextReader(context);
+
+    return new FilterLeafCollector(super.getLeafCollector(context)) {
+
+      /**
+       * This collector requires that docs be collected in order, otherwise
+       * the computed number of scanned docs in the resulting
+       * {@link EarlyTerminatingCollectorException} will be meaningless.
+       */
+      @Override
+      public boolean acceptsDocsOutOfOrder() {
+        return false;
+      }
+
+      @Override
+      public void collect(int doc) throws IOException {
+        super.collect(doc);
+        numCollected++;
+        if (maxDocsToCollect <= numCollected) {
+          throw new EarlyTerminatingCollectorException
+            (numCollected, prevReaderCumulativeSize + (doc + 1));
+        }
+      }
+
+    };
   }
-  @Override
-  public void setScorer(Scorer scorer) throws IOException {
-    delegate.setScorer(scorer);    
-  }
+
 }
diff --git a/solr/core/src/java/org/apache/solr/search/FunctionRangeQuery.java b/solr/core/src/java/org/apache/solr/search/FunctionRangeQuery.java
index 4e913dd..91bc1c0 100644
--- a/solr/core/src/java/org/apache/solr/search/FunctionRangeQuery.java
+++ b/solr/core/src/java/org/apache/solr/search/FunctionRangeQuery.java
@@ -22,6 +22,9 @@
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.ValueSourceScorer;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.FilterCollector;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.solr.search.function.ValueSourceRangeFilter;
 
@@ -55,16 +58,16 @@
     @Override
     public void collect(int doc) throws IOException {
       if (doc<maxdoc && scorer.matches(doc)) {
-        delegate.collect(doc);
+        leafDelegate.collect(doc);
       }
     }
 
     @Override
-    public void setNextReader(AtomicReaderContext context) throws IOException {
+    protected void doSetNextReader(AtomicReaderContext context) throws IOException {
+      super.doSetNextReader(context);
       maxdoc = context.reader().maxDoc();
       FunctionValues dv = rangeFilt.getValueSource().getValues(fcontext, context);
       scorer = dv.getRangeScorer(context.reader(), rangeFilt.getLowerVal(), rangeFilt.getUpperVal(), rangeFilt.isIncludeLower(), rangeFilt.isIncludeUpper());
-      super.setNextReader(context);
     }
   }
 }
diff --git a/solr/core/src/java/org/apache/solr/search/Grouping.java b/solr/core/src/java/org/apache/solr/search/Grouping.java
index 88066de..ee12482 100644
--- a/solr/core/src/java/org/apache/solr/search/Grouping.java
+++ b/solr/core/src/java/org/apache/solr/search/Grouping.java
@@ -342,12 +342,12 @@
       }
     }
 
-    Collector allCollectors = MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()]));
     DocSetCollector setCollector = null;
     if (getDocSet && allGroupHeadsCollector == null) {
-      setCollector = new DocSetDelegateCollector(maxDoc >> 6, maxDoc, allCollectors);
-      allCollectors = setCollector;
+      setCollector = new DocSetCollector(maxDoc >> 6, maxDoc);
+      collectors.add(setCollector);
     }
+    Collector allCollectors = MultiCollector.wrap(collectors);
 
     CachingCollector cachedCollector = null;
     if (cacheSecondPassSearch && allCollectors != null) {
diff --git a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
index be97d81..176c0df 100644
--- a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
+++ b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
@@ -60,6 +60,7 @@
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.Collector;
@@ -71,9 +72,11 @@
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.MultiCollector;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.TermQuery;
@@ -930,17 +933,17 @@
         if (idIter == null) continue;
       }
 
-      collector.setNextReader(leaf);
+      final LeafCollector leafCollector = collector.getLeafCollector(leaf);
       int max = reader.maxDoc();
 
       if (idIter == null) {
         for (int docid = 0; docid<max; docid++) {
           if (liveDocs != null && !liveDocs.get(docid)) continue;
-          collector.collect(docid);
+          leafCollector.collect(docid);
         }
       } else {
         for (int docid = -1; (docid = idIter.advance(docid+1)) < max; ) {
-          collector.collect(docid);
+          leafCollector.collect(docid);
         }
       }
     }
@@ -1526,24 +1529,18 @@
       Collector collector;
 
       if (!needScores) {
-        collector = new Collector () {
-          @Override
-          public void setScorer(Scorer scorer) {
-          }
+        collector = new SimpleCollector () {
           @Override
           public void collect(int doc) {
             numHits[0]++;
           }
           @Override
-          public void setNextReader(AtomicReaderContext context) {
-          }
-          @Override
           public boolean acceptsDocsOutOfOrder() {
             return true;
           }
         };
       } else {
-        collector = new Collector() {
+        collector = new SimpleCollector() {
           Scorer scorer;
           @Override
           public void setScorer(Scorer scorer) {
@@ -1556,9 +1553,6 @@
             if (score > topscore[0]) topscore[0]=score;            
           }
           @Override
-          public void setNextReader(AtomicReaderContext context) {
-          }
-          @Override
           public boolean acceptsDocsOutOfOrder() {
             return true;
           }
@@ -1667,30 +1661,33 @@
       final float[] topscore = new float[] { Float.NEGATIVE_INFINITY };
 
       Collector collector;
-      DocSetCollector setCollector;
+      final DocSetCollector setCollector = new DocSetCollector(smallSetSize, maxDoc);
 
        if (!needScores) {
-         collector = setCollector = new DocSetCollector(smallSetSize, maxDoc);
+         collector = setCollector;
        } else {
-         collector = setCollector = new DocSetDelegateCollector(smallSetSize, maxDoc, new Collector() {
+         final Collector topScoreCollector = new SimpleCollector() {
+          
            Scorer scorer;
+           
            @Override
-          public void setScorer(Scorer scorer) {
-             this.scorer = scorer;
-           }
-           @Override
+          public void setScorer(Scorer scorer) throws IOException {
+            this.scorer = scorer;
+          }
+           
+          @Override
           public void collect(int doc) throws IOException {
-             float score = scorer.score();
-             if (score > topscore[0]) topscore[0]=score;
-           }
-           @Override
-          public void setNextReader(AtomicReaderContext context) {
-           }
-           @Override
+            float score = scorer.score();
+            if (score > topscore[0]) topscore[0] = score;
+          }
+          
+          @Override
           public boolean acceptsDocsOutOfOrder() {
-             return false;
-           }
-         });
+            return true;
+          }
+        };
+        
+        collector = MultiCollector.wrap(setCollector, topScoreCollector);
        }
        if (terminateEarly) {
          collector = new EarlyTerminatingCollector(collector, cmd.len);
@@ -1726,8 +1723,8 @@
     } else {
 
       final TopDocsCollector topCollector = buildTopDocsCollector(len, cmd);
-      DocSetCollector setCollector = new DocSetDelegateCollector(maxDoc>>6, maxDoc, topCollector);
-      Collector collector = setCollector;
+      DocSetCollector setCollector = new DocSetCollector(maxDoc>>6, maxDoc);
+      Collector collector = MultiCollector.wrap(topCollector, setCollector);
       if (terminateEarly) {
         collector = new EarlyTerminatingCollector(collector, cmd.len);
       }
@@ -2031,7 +2028,7 @@
         AtomicReaderContext leaf = leafContexts.get(readerIndex++);
         base = leaf.docBase;
         end = base + leaf.reader().maxDoc();
-        topCollector.setNextReader(leaf);
+        topCollector.getLeafCollector(leaf);
         // we should never need to set the scorer given the settings for the collector
       }
       topCollector.collect(doc-base);
diff --git a/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java b/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java
index 4d10c93..e842513 100644
--- a/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java
+++ b/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java
@@ -29,12 +29,10 @@
 import org.apache.lucene.search.TotalHitCountCollector;
 import org.apache.lucene.search.grouping.AbstractAllGroupHeadsCollector;
 import org.apache.lucene.search.grouping.term.TermAllGroupHeadsCollector;
-import org.apache.lucene.util.FixedBitSet;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.search.BitDocSet;
 import org.apache.solr.search.DocSet;
 import org.apache.solr.search.DocSetCollector;
-import org.apache.solr.search.DocSetDelegateCollector;
 import org.apache.solr.search.QueryUtils;
 import org.apache.solr.search.SolrIndexSearcher;
 import org.apache.solr.search.SolrIndexSearcher.ProcessedFilter;
@@ -173,14 +171,11 @@
 
   private DocSet computeDocSet(Query query, ProcessedFilter filter, List<Collector> collectors) throws IOException {
     int maxDoc = searcher.maxDoc();
-    DocSetCollector docSetCollector;
-    if (collectors.isEmpty()) {
-      docSetCollector = new DocSetCollector(maxDoc >> 6, maxDoc);
-    } else {
-      Collector wrappedCollectors = MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()]));
-      docSetCollector = new DocSetDelegateCollector(maxDoc >> 6, maxDoc, wrappedCollectors);
-    }
-    searchWithTimeLimiter(query, filter, docSetCollector);
+    final Collector collector;
+    final DocSetCollector docSetCollector = new DocSetCollector(maxDoc >> 6, maxDoc);
+    List<Collector> allCollectors = new ArrayList<>(collectors);
+    allCollectors.add(docSetCollector);
+    searchWithTimeLimiter(query, filter, MultiCollector.wrap(allCollectors));
     return docSetCollector.getDocSet();
   }
 
diff --git a/solr/core/src/java/org/apache/solr/search/grouping/collector/FilterCollector.java b/solr/core/src/java/org/apache/solr/search/grouping/collector/FilterCollector.java
index 5ab7f18..3dd8545 100644
--- a/solr/core/src/java/org/apache/solr/search/grouping/collector/FilterCollector.java
+++ b/solr/core/src/java/org/apache/solr/search/grouping/collector/FilterCollector.java
@@ -17,52 +17,42 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.index.AtomicReaderContext;
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.Scorer;
-import org.apache.solr.search.DocSet;
-
 import java.io.IOException;
 
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.FilterLeafCollector;
+import org.apache.solr.search.DocSet;
+
 /**
  * A collector that filters incoming doc ids that are not in the filter.
  *
  * @lucene.experimental
  */
-public class FilterCollector extends Collector {
+public class FilterCollector extends org.apache.lucene.search.FilterCollector {
 
   private final DocSet filter;
-  private final Collector delegate;
-  private int docBase;
   private int matches;
 
   public FilterCollector(DocSet filter, Collector delegate) {
+    super(delegate);
     this.filter = filter;
-    this.delegate = delegate;
   }
 
   @Override
-  public void setScorer(Scorer scorer) throws IOException {
-    delegate.setScorer(scorer);
-  }
-
-  @Override
-  public void collect(int doc) throws IOException {
-    matches++;
-    if (filter.exists(doc + docBase)) {
-      delegate.collect(doc);
-    }
-  }
-
-  @Override
-  public void setNextReader(AtomicReaderContext context) throws IOException {
-    this.docBase = context.docBase;
-    delegate.setNextReader(context);
-  }
-
-  @Override
-  public boolean acceptsDocsOutOfOrder() {
-    return delegate.acceptsDocsOutOfOrder();
+  public LeafCollector getLeafCollector(AtomicReaderContext context)
+      throws IOException {
+    final int docBase = context.docBase;
+    return new FilterLeafCollector(super.getLeafCollector(context)) {
+      @Override
+      public void collect(int doc) throws IOException {
+        matches++;
+        if (filter.exists(doc + docBase)) {
+          super.collect(doc);
+        }
+      }
+    };
   }
 
   public int getMatches() {
@@ -75,6 +65,6 @@
    * @return the delegate collector
    */
   public Collector getDelegate() {
-    return delegate;
+    return in;
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/search/TestSort.java b/solr/core/src/test/org/apache/solr/search/TestSort.java
index b319521..9671374 100644
--- a/solr/core/src/test/org/apache/solr/search/TestSort.java
+++ b/solr/core/src/test/org/apache/solr/search/TestSort.java
@@ -32,10 +32,13 @@
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.BitsFilteredDocIdSet;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.DocIdSet;
 import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.FilterLeafCollector;
+import org.apache.lucene.search.FilterCollector;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.ScoreDoc;
@@ -265,30 +268,21 @@
 
         final List<MyDoc> collectedDocs = new ArrayList<>();
         // delegate and collect docs ourselves
-        Collector myCollector = new Collector() {
-          int docBase;
+        Collector myCollector = new FilterCollector(topCollector) {
 
           @Override
-          public void setScorer(Scorer scorer) throws IOException {
-            topCollector.setScorer(scorer);
+          public LeafCollector getLeafCollector(AtomicReaderContext context)
+              throws IOException {
+            final int docBase = context.docBase;
+            return new FilterLeafCollector(super.getLeafCollector(context)) {
+              @Override
+              public void collect(int doc) throws IOException {
+                super.collect(doc);
+                collectedDocs.add(mydocs[docBase + doc]);
+              }
+            };
           }
 
-          @Override
-          public void collect(int doc) throws IOException {
-            topCollector.collect(doc);
-            collectedDocs.add(mydocs[doc + docBase]);
-          }
-
-          @Override
-          public void setNextReader(AtomicReaderContext context) throws IOException {
-            topCollector.setNextReader(context);
-            docBase = context.docBase;
-          }
-
-          @Override
-          public boolean acceptsDocsOutOfOrder() {
-            return topCollector.acceptsDocsOutOfOrder();
-          }
         };
 
         searcher.search(new MatchAllDocsQuery(), filt, myCollector);