LUCENE-2878: Merge from trunk
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/LUCENE-2878@1535436 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/dev-tools/idea/lucene/highlighter/highlighter.iml b/dev-tools/idea/lucene/highlighter/highlighter.iml
index 0a8e679..8b6d644 100644
--- a/dev-tools/idea/lucene/highlighter/highlighter.iml
+++ b/dev-tools/idea/lucene/highlighter/highlighter.iml
@@ -12,6 +12,7 @@
     <orderEntry type="sourceFolder" forTests="false" />
     <orderEntry type="library" scope="TEST" name="JUnit" level="project" />
     <orderEntry type="module" scope="TEST" module-name="lucene-test-framework" />
+    <orderEntry type="module" scope="TEST" module-name="codecs" />
     <orderEntry type="module" module-name="memory" />
     <orderEntry type="module" module-name="misc" />
     <orderEntry type="module" module-name="queries" />
diff --git a/dev-tools/maven/lucene/highlighter/pom.xml.template b/dev-tools/maven/lucene/highlighter/pom.xml.template
index 7964b7d..5b6f8a1 100644
--- a/dev-tools/maven/lucene/highlighter/pom.xml.template
+++ b/dev-tools/maven/lucene/highlighter/pom.xml.template
@@ -58,6 +58,12 @@
     </dependency>
     <dependency>
       <groupId>${project.groupId}</groupId>
+      <artifactId>lucene-codecs</artifactId>
+      <version>${project.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>${project.groupId}</groupId>
       <artifactId>lucene-memory</artifactId>
       <version>${project.version}</version>
     </dependency>
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java
index e2a1ea0..baa8113 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java
@@ -7,7 +7,7 @@
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
@@ -282,7 +282,7 @@
 
     // Make sure position is still incremented when
     // massive term is skipped:
-    DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(reader,
+    DocsEnum tps = MultiFields.getTermPositionsEnum(reader,
                                                                 MultiFields.getLiveDocs(reader),
                                                                 "content",
                                                                 new BytesRef("another"));
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
index b49e122..41c7cfe 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
@@ -31,7 +31,7 @@
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.Terms;
@@ -111,7 +111,7 @@
     TermsEnum termsEnum = vector.iterator(null);
     termsEnum.next();
     assertEquals(2, termsEnum.totalTermFreq());
-    DocsAndPositionsEnum positions = termsEnum.docsAndPositions(null, null);
+    DocsEnum positions = termsEnum.docsAndPositions(null, null);
     assertTrue(positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
     assertEquals(2, positions.freq());
     positions.nextPosition();
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java
index bb81e8b..1d56038 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java
@@ -27,7 +27,7 @@
 import org.apache.lucene.codecs.FieldsProducer;
 import org.apache.lucene.codecs.PostingsReaderBase;
 import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfo;
@@ -675,7 +675,7 @@
       }
 
       @Override
-      public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+      public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
         if (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
           // Positions were not indexed:
           return null;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
index 6960387..1214320 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
@@ -17,20 +17,11 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.Map;
-
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.bloom.FuzzySet.ContainsResult;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.Fields;
@@ -48,6 +39,14 @@
 import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.automaton.CompiledAutomaton;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
 /**
  * <p>
  * A {@link PostingsFormat} useful for low doc-frequency fields such as primary
@@ -366,11 +365,10 @@
       public long totalTermFreq() throws IOException {
         return delegate().totalTermFreq();
       }
-      
 
       @Override
-      public DocsAndPositionsEnum docsAndPositions(Bits liveDocs,
-          DocsAndPositionsEnum reuse, int flags) throws IOException {
+      public DocsEnum docsAndPositions(Bits liveDocs,
+          DocsEnum reuse, int flags) throws IOException {
         return delegate().docsAndPositions(liveDocs, reuse, flags);
       }
 
@@ -379,6 +377,7 @@
           throws IOException {
         return delegate().docs(liveDocs, reuse, flags);
       }
+
     }
 
     @Override
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
index 0e02e7e..cf58e96 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
@@ -17,20 +17,13 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.TreeMap;
-
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
 import org.apache.lucene.codecs.PostingsFormat;
-import org.apache.lucene.codecs.lucene41.Lucene41PostingsFormat; // javadocs
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.codecs.lucene41.Lucene41PostingsFormat;
 import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.OrdTermState;
 import org.apache.lucene.index.SegmentReadState;
@@ -48,6 +41,12 @@
 import org.apache.lucene.util.automaton.RunAutomaton;
 import org.apache.lucene.util.automaton.Transition;
 
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.TreeMap;
+
 // TODO: 
 //   - build depth-N prefix hash?
 //   - or: longer dense skip lists than just next byte?
@@ -296,7 +295,7 @@
 
       BytesRef term;
       DocsEnum docsEnum = null;
-      DocsAndPositionsEnum docsAndPositionsEnum = null;
+      DocsEnum docsAndPositionsEnum = null;
       final TermsEnum termsEnum = termsIn.iterator(null);
       int termOffset = 0;
 
@@ -894,7 +893,7 @@
       }
 
       @Override
-      public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
+      public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) {
         if (!hasPos) {
           return null;
         }
@@ -1445,7 +1444,7 @@
       }
 
       @Override
-      public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
+      public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) {
         if (!hasPos) {
           return null;
         }
@@ -1698,7 +1697,7 @@
     }
   }
 
-  private final static class LowFreqDocsAndPositionsEnum extends DocsAndPositionsEnum {
+  private final static class LowFreqDocsAndPositionsEnum extends DocsEnum {
     private int[] postings;
     private final Bits liveDocs;
     private final int posMult;
@@ -1733,7 +1732,7 @@
       }
     }
 
-    public DocsAndPositionsEnum reset(int[] postings, byte[] payloadBytes) {
+    public DocsEnum reset(int[] postings, byte[] payloadBytes) {
       this.postings = postings;
       upto = 0;
       skipPositions = 0;
@@ -1803,7 +1802,9 @@
 
     @Override
     public int nextPosition() {
-      assert skipPositions > 0;
+      //assert skipPositions > 0;
+      if (skipPositions == 0)
+        return NO_MORE_POSITIONS;
       skipPositions--;
       final int pos = postings[upto++];
       if (hasOffsets) {
@@ -2031,7 +2032,7 @@
   }
 
   // TODO: specialize offsets and not
-  private final static class HighFreqDocsAndPositionsEnum extends DocsAndPositionsEnum {
+  private final static class HighFreqDocsAndPositionsEnum extends DocsEnum {
     private int[] docIDs;
     private int[] freqs;
     private int[][] positions;
@@ -2066,7 +2067,7 @@
       return liveDocs;
     }
 
-    public DocsAndPositionsEnum reset(int[] docIDs, int[] freqs, int[][] positions, byte[][][] payloads) {
+    public DocsEnum reset(int[] docIDs, int[] freqs, int[][] positions, byte[][][] payloads) {
       this.docIDs = docIDs;
       this.freqs = freqs;
       this.positions = positions;
@@ -2110,6 +2111,8 @@
 
     @Override
     public int nextPosition() {
+      if (posUpto >= curPositions.length)
+        return NO_MORE_POSITIONS;
       posUpto += posJump;
       return curPositions[posUpto];
     }
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
index 4403a30..b1c401f 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
@@ -17,19 +17,14 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.TreeMap;
-
+import org.apache.lucene.codecs.BlockTermState;
+import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.codecs.FieldsProducer;
+import org.apache.lucene.codecs.PostingsReaderBase;
 import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.SegmentInfo;
@@ -40,23 +35,26 @@
 import org.apache.lucene.store.ByteArrayDataInput;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.automaton.ByteRunAutomaton;
-import org.apache.lucene.util.automaton.CompiledAutomaton;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.RamUsageEstimator;
+import org.apache.lucene.util.automaton.ByteRunAutomaton;
+import org.apache.lucene.util.automaton.CompiledAutomaton;
 import org.apache.lucene.util.fst.BytesRefFSTEnum;
 import org.apache.lucene.util.fst.BytesRefFSTEnum.InputOutput;
 import org.apache.lucene.util.fst.FST;
 import org.apache.lucene.util.fst.Outputs;
 import org.apache.lucene.util.fst.PositiveIntOutputs;
 import org.apache.lucene.util.fst.Util;
-import org.apache.lucene.codecs.BlockTermState;
-import org.apache.lucene.codecs.FieldsProducer;
-import org.apache.lucene.codecs.PostingsReaderBase;
-import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.codecs.memory.FSTTermsReader.TermsReader;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.BitSet;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.TreeMap;
 
 /** 
  * FST-based terms dictionary reader.
@@ -395,12 +393,8 @@
       }
 
       @Override
-      public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
-        if (!hasPositions()) {
-          return null;
-        }
-        decodeMetaData();
-        return postingsReader.docsAndPositions(fieldInfo, state, liveDocs, reuse, flags);
+      public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+        return docs(liveDocs, reuse, flags);
       }
 
       // TODO: this can be achieved by making use of Util.getByOutput()
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
index f88ae9f..27f5a8bf 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
@@ -17,18 +17,14 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.TreeMap;
-
+import org.apache.lucene.codecs.BlockTermState;
+import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.codecs.FieldsProducer;
+import org.apache.lucene.codecs.PostingsReaderBase;
 import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.SegmentInfo;
@@ -39,21 +35,24 @@
 import org.apache.lucene.store.ByteArrayDataInput;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.automaton.ByteRunAutomaton;
-import org.apache.lucene.util.automaton.CompiledAutomaton;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.RamUsageEstimator;
+import org.apache.lucene.util.automaton.ByteRunAutomaton;
+import org.apache.lucene.util.automaton.CompiledAutomaton;
 import org.apache.lucene.util.fst.BytesRefFSTEnum;
 import org.apache.lucene.util.fst.BytesRefFSTEnum.InputOutput;
 import org.apache.lucene.util.fst.FST;
 import org.apache.lucene.util.fst.Outputs;
 import org.apache.lucene.util.fst.Util;
-import org.apache.lucene.codecs.BlockTermState;
-import org.apache.lucene.codecs.FieldsProducer;
-import org.apache.lucene.codecs.PostingsReaderBase;
-import org.apache.lucene.codecs.CodecUtil;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.BitSet;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.TreeMap;
 
 /**
  * FST-based terms dictionary reader.
@@ -275,12 +274,8 @@
       }
 
       @Override
-      public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
-        if (!hasPositions()) {
-          return null;
-        }
-        decodeMetaData();
-        return postingsReader.docsAndPositions(fieldInfo, state, liveDocs, reuse, flags);
+      public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+        return docs(liveDocs, reuse, flags);
       }
 
       @Override
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java
index 3438077..657f49d 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java
@@ -25,7 +25,6 @@
 import org.apache.lucene.codecs.DocValuesProducer;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
@@ -627,7 +626,7 @@
     }
 
     @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+    public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
       throw new UnsupportedOperationException();
     }
   }
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
index 728e1eb..f93d531 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
@@ -17,22 +17,13 @@
  * limitations under the License.
  */
 
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.SortedMap;
-import java.util.TreeMap;
-
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.TermStats;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.IndexFileNames;
@@ -59,6 +50,14 @@
 import org.apache.lucene.util.fst.Util;
 import org.apache.lucene.util.packed.PackedInts;
 
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
 // TODO: would be nice to somehow allow this to act like
 // InstantiatedIndex, by never writing to disk; ie you write
 // to this Codec in RAM only and then when you open a reader
@@ -303,7 +302,6 @@
           long sumTotalTermFreq = 0;
           long sumDocFreq = 0;
           DocsEnum docsEnum = null;
-          DocsAndPositionsEnum posEnum = null;
           int enumFlags;
 
           IndexOptions indexOptions = fieldInfo.getIndexOptions();
@@ -318,15 +316,15 @@
             enumFlags = DocsEnum.FLAG_FREQS;
           } else if (writeOffsets == false) {
             if (writePayloads) {
-              enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS;
+              enumFlags = DocsEnum.FLAG_PAYLOADS;
             } else {
               enumFlags = 0;
             }
           } else {
             if (writePayloads) {
-              enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS | DocsAndPositionsEnum.FLAG_OFFSETS;
+              enumFlags = DocsEnum.FLAG_PAYLOADS | DocsEnum.FLAG_OFFSETS;
             } else {
-              enumFlags = DocsAndPositionsEnum.FLAG_OFFSETS;
+              enumFlags = DocsEnum.FLAG_OFFSETS;
             }
           }
 
@@ -338,11 +336,9 @@
             termsWriter.postingsWriter.reset();
 
             if (writePositions) {
-              posEnum = termsEnum.docsAndPositions(null, posEnum, enumFlags);
-              docsEnum = posEnum;
+              docsEnum = termsEnum.docsAndPositions(null, docsEnum, enumFlags);
             } else {
               docsEnum = termsEnum.docs(null, docsEnum, enumFlags);
-              posEnum = null;
             }
 
             int docFreq = 0;
@@ -366,13 +362,13 @@
               termsWriter.postingsWriter.startDoc(docID, freq);
               if (writePositions) {
                 for (int i=0;i<freq;i++) {
-                  int pos = posEnum.nextPosition();
-                  BytesRef payload = writePayloads ? posEnum.getPayload() : null;
+                  int pos = docsEnum.nextPosition();
+                  BytesRef payload = writePayloads ? docsEnum.getPayload() : null;
                   int startOffset;
                   int endOffset;
                   if (writeOffsets) {
-                    startOffset = posEnum.startOffset();
-                    endOffset = posEnum.endOffset();
+                    startOffset = docsEnum.startOffset();
+                    endOffset = docsEnum.endOffset();
                   } else {
                     startOffset = -1;
                     endOffset = -1;
@@ -539,7 +535,7 @@
     }
   }
 
-  private final static class FSTDocsAndPositionsEnum extends DocsAndPositionsEnum {
+  private final static class FSTDocsAndPositionsEnum extends DocsEnum {
     private final boolean storePayloads;
     private byte[] buffer = new byte[16];
     private final ByteArrayDataInput in = new ByteArrayDataInput(buffer);
@@ -653,7 +649,8 @@
     @Override
     public int nextPosition() {
       //System.out.println("    nextPos storePayloads=" + storePayloads + " this=" + this);
-      assert posPending > 0;
+      if (posPending == 0)
+        return NO_MORE_POSITIONS;
       posPending--;
       if (!storePayloads) {
         pos += in.readVInt();
@@ -810,7 +807,7 @@
     }
 
     @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
+    public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) {
 
       boolean hasOffsets = field.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
       if (field.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsReader.java
index af85c4a..ee0f843 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsReader.java
@@ -17,19 +17,13 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.IdentityHashMap;
-import java.util.Map;
-import java.util.TreeMap;
-
 import org.apache.lucene.codecs.BlockTermState;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.PostingsReaderBase;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.SegmentReadState;
 import org.apache.lucene.index.TermState;
 import org.apache.lucene.store.ByteArrayDataInput;
@@ -43,6 +37,11 @@
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 
+import java.io.IOException;
+import java.util.IdentityHashMap;
+import java.util.Map;
+import java.util.TreeMap;
+
 /** Concrete class that reads the current doc/freq/skip
  *  postings format 
  *  @lucene.experimental */
@@ -230,7 +229,7 @@
   }
 
   @Override
-  public DocsAndPositionsEnum docsAndPositions(FieldInfo field, BlockTermState _termState, Bits liveDocs, DocsAndPositionsEnum reuse,
+  public DocsEnum docsAndPositions(FieldInfo field, BlockTermState _termState, Bits liveDocs, DocsEnum reuse,
                                                int flags) throws IOException {
 
     final PulsingTermState termState = (PulsingTermState) _termState;
@@ -257,7 +256,7 @@
       return postings.reset(liveDocs, termState);
     } else {
       if (reuse instanceof PulsingDocsAndPositionsEnum) {
-        DocsAndPositionsEnum wrapped = wrappedPostingsReader.docsAndPositions(field, termState.wrappedTermState, liveDocs, (DocsAndPositionsEnum) getOther(reuse),
+        DocsEnum wrapped = wrappedPostingsReader.docsAndPositions(field, termState.wrappedTermState, liveDocs, getOther(reuse),
                                                                               flags);
         setOther(wrapped, reuse); // wrapped.other = reuse
         return wrapped;
@@ -389,7 +388,7 @@
     }
   }
 
-  private static class PulsingDocsAndPositionsEnum extends DocsAndPositionsEnum {
+  private static class PulsingDocsAndPositionsEnum extends DocsEnum {
     private byte[] postingsBytes;
     private final ByteArrayDataInput postings = new ByteArrayDataInput();
     private final boolean storePayloads;
@@ -494,7 +493,9 @@
     public int nextPosition() throws IOException {
       //System.out.println("PR d&p nextPosition posPending=" + posPending + " vs freq=" + freq);
       
-      assert posPending > 0;
+      //assert posPending > 0;
+      if (posPending == 0)
+        return NO_MORE_POSITIONS;
       posPending--;
 
       if (storePayloads) {
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsWriter.java
index 4a3c214..01ddcf1 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsWriter.java
@@ -24,7 +24,6 @@
 import org.apache.lucene.codecs.BlockTermState;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.PostingsWriterBase;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfo;
@@ -74,7 +73,6 @@
 
   // Reused by writeTerm:
   private DocsEnum docsEnum;
-  private DocsAndPositionsEnum posEnum;
   private int enumFlags;
 
   private final RAMOutputStream buffer = new RAMOutputStream();
@@ -157,13 +155,13 @@
         posCount++;
       }
     } else {
-      posEnum = termsEnum.docsAndPositions(null, posEnum, enumFlags);
-      assert posEnum != null;
+      docsEnum = termsEnum.docsAndPositions(null, docsEnum, enumFlags);
+      assert docsEnum != null;
       while (posCount <= maxPositions) {
-        if (posEnum.nextDoc() == DocsEnum.NO_MORE_DOCS) {
+        if (docsEnum.nextDoc() == DocsEnum.NO_MORE_DOCS) {
           break;
         }
-        posCount += posEnum.freq();
+        posCount += docsEnum.freq();
       }
     }
 
@@ -187,8 +185,7 @@
       if (fieldHasPositions == false) {
         docsEnum = termsEnum.docs(null, docsEnum, enumFlags);
       } else {
-        posEnum = termsEnum.docsAndPositions(null, posEnum, enumFlags);
-        docsEnum = posEnum;
+        docsEnum = termsEnum.docsAndPositions(null, docsEnum, enumFlags);
       }
       assert docsEnum != null;
 
@@ -235,13 +232,13 @@
             int lastPos = 0;
             int lastOffset = 0;
             for(int posIDX=0;posIDX<freq;posIDX++) {
-              int pos = posEnum.nextPosition();
+              int pos = docsEnum.nextPosition();
               int posDelta = pos - lastPos;
               lastPos = pos;
               int payloadLength;
               BytesRef payload;
               if (fieldHasPayloads) {
-                payload = posEnum.getPayload();
+                payload = docsEnum.getPayload();
                 payloadLength = payload == null ? 0 : payload.length;
                 if (payloadLength != lastPayloadLength) {
                   buffer.writeVInt((posDelta << 1)|1);
@@ -257,8 +254,8 @@
               }
 
               if (fieldHasOffsets) {
-                int startOffset = posEnum.startOffset();
-                int endOffset = posEnum.endOffset();
+                int startOffset = docsEnum.startOffset();
+                int endOffset = docsEnum.endOffset();
                 int offsetDelta = startOffset - lastOffset;
                 int offsetLength = endOffset - startOffset;
                 if (offsetLength != lastOffsetLength) {
@@ -318,15 +315,15 @@
       enumFlags = DocsEnum.FLAG_FREQS;
     } else if (fieldHasOffsets == false) {
       if (fieldHasPayloads) {
-        enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS;
+        enumFlags = DocsEnum.FLAG_PAYLOADS;
       } else {
         enumFlags = 0;
       }
     } else {
       if (fieldHasPayloads) {
-        enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS | DocsAndPositionsEnum.FLAG_OFFSETS;
+        enumFlags = DocsEnum.FLAG_PAYLOADS | DocsEnum.FLAG_OFFSETS;
       } else {
-        enumFlags = DocsAndPositionsEnum.FLAG_OFFSETS;
+        enumFlags = DocsEnum.FLAG_OFFSETS;
       }
     }
     return 0;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/sep/SepPostingsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/sep/SepPostingsReader.java
index d239f15..5bca591 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/sep/SepPostingsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/sep/SepPostingsReader.java
@@ -17,15 +17,12 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-
 import org.apache.lucene.codecs.BlockTermState;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.PostingsReaderBase;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.SegmentInfo;
@@ -40,6 +37,8 @@
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 
+import java.io.IOException;
+
 /** Concrete class that reads the current doc/freq/skip
  *  postings format.    
  *
@@ -228,8 +227,8 @@
   }
 
   @Override
-  public DocsAndPositionsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState _termState, Bits liveDocs,
-                                               DocsAndPositionsEnum reuse, int flags)
+  public DocsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState _termState, Bits liveDocs,
+                                               DocsEnum reuse, int flags)
     throws IOException {
 
     assert fieldInfo.getIndexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
@@ -421,7 +420,7 @@
     }
   }
 
-  class SepDocsAndPositionsEnum extends DocsAndPositionsEnum {
+  class SepDocsAndPositionsEnum extends DocsEnum {
     int docFreq;
     int doc = -1;
     int accum;
@@ -615,6 +614,11 @@
 
     @Override
     public int nextPosition() throws IOException {
+
+      //nocommit, not sure about this one
+      if (pendingPosCount == 0)
+        return NO_MORE_POSITIONS;
+
       if (posSeekPending) {
         posIndex.seek(posReader);
         payloadIn.seek(payloadFP);
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java
index 4b25a24..e95319b 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java
@@ -17,18 +17,10 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.TreeMap;
-
 import org.apache.lucene.codecs.FieldsProducer;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.SegmentReadState;
 import org.apache.lucene.index.Terms;
@@ -50,6 +42,14 @@
 import org.apache.lucene.util.fst.PositiveIntOutputs;
 import org.apache.lucene.util.fst.Util;
 
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.TreeMap;
+
 class SimpleTextFieldsReader extends FieldsProducer {
   private final TreeMap<String,Long> fields;
   private final IndexInput in;
@@ -192,17 +192,17 @@
  
     @Override
     public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
-      SimpleTextDocsEnum docsEnum;
+      SimpleTextDocsAndPositionsEnum docsEnum;
       if (reuse != null && reuse instanceof SimpleTextDocsEnum && ((SimpleTextDocsEnum) reuse).canReuse(SimpleTextFieldsReader.this.in)) {
-        docsEnum = (SimpleTextDocsEnum) reuse;
+        docsEnum = (SimpleTextDocsAndPositionsEnum) reuse;
       } else {
-        docsEnum = new SimpleTextDocsEnum();
+        docsEnum = new SimpleTextDocsAndPositionsEnum();
       }
-      return docsEnum.reset(docsStart, liveDocs, indexOptions == IndexOptions.DOCS_ONLY, docFreq);
+      return docsEnum.reset(docsStart, liveDocs, indexOptions, docFreq);
     }
 
     @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+    public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
 
       if (indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
         // Positions were not indexed
@@ -318,7 +318,7 @@
     }
   }
 
-  private class SimpleTextDocsAndPositionsEnum extends DocsAndPositionsEnum {
+  private class SimpleTextDocsAndPositionsEnum extends DocsEnum {
     private final IndexInput inStart;
     private final IndexInput in;
     private int docID = -1;
@@ -334,6 +334,7 @@
     private boolean readPositions;
     private int startOffset;
     private int endOffset;
+    private int posPending;
     private int cost;
 
     public SimpleTextDocsAndPositionsEnum() {
@@ -387,10 +388,12 @@
           UnicodeUtil.UTF8toUTF16(scratch.bytes, scratch.offset+DOC.length, scratch.length-DOC.length, scratchUTF16);
           docID = ArrayUtil.parseInt(scratchUTF16.chars, 0, scratchUTF16.length);
           tf = 0;
+          posPending = 0;
           first = false;
         } else if (StringHelper.startsWith(scratch, FREQ)) {
           UnicodeUtil.UTF8toUTF16(scratch.bytes, scratch.offset+FREQ.length, scratch.length-FREQ.length, scratchUTF16);
           tf = ArrayUtil.parseInt(scratchUTF16.chars, 0, scratchUTF16.length);
+          posPending = tf;
           posStart = in.getFilePointer();
         } else if (StringHelper.startsWith(scratch, POS)) {
           // skip
@@ -421,6 +424,9 @@
     @Override
     public int nextPosition() throws IOException {
       final int pos;
+      if (posPending == 0)
+        return NO_MORE_POSITIONS;
+
       if (readPositions) {
         SimpleTextUtil.readLine(in, scratch);
         assert StringHelper.startsWith(scratch, POS): "got line=" + scratch.utf8ToString();
@@ -455,6 +461,7 @@
         payload = null;
         in.seek(fp);
       }
+      posPending--;
       return pos;
     }
 
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java
index 2c30d0e..b4f74c2 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java
@@ -17,11 +17,7 @@
  * limitations under the License.
  */
 
-import java.io.Closeable;
-import java.io.IOException;
-
 import org.apache.lucene.codecs.FieldsConsumer;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
@@ -33,6 +29,9 @@
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 
+import java.io.Closeable;
+import java.io.IOException;
+
 class SimpleTextFieldsWriter extends FieldsConsumer implements Closeable {
   
   private final IndexOutput out;
@@ -92,10 +91,10 @@
       if (hasPositions) {
         
         if (hasPayloads) {
-          flags = flags | DocsAndPositionsEnum.FLAG_PAYLOADS;
+          flags = flags | DocsEnum.FLAG_PAYLOADS;
         }
         if (hasOffsets) {
-          flags = flags | DocsAndPositionsEnum.FLAG_OFFSETS;
+          flags = flags | DocsEnum.FLAG_OFFSETS;
         }
       } else {
         if (hasFreqs) {
@@ -104,7 +103,6 @@
       }
 
       TermsEnum termsEnum = terms.iterator(null);
-      DocsAndPositionsEnum posEnum = null;
       DocsEnum docsEnum = null;
 
       // for each term in field
@@ -115,8 +113,7 @@
         }
 
         if (hasPositions) {
-          posEnum = termsEnum.docsAndPositions(null, posEnum, flags);
-          docsEnum = posEnum;
+          docsEnum = termsEnum.docsAndPositions(null, docsEnum, flags);
         } else {
           docsEnum = termsEnum.docs(null, docsEnum, flags);
         }
@@ -165,15 +162,15 @@
 
               // for each pos in field+term+doc
               for(int i=0;i<freq;i++) {
-                int position = posEnum.nextPosition();
+                int position = docsEnum.nextPosition();
 
                 write(POS);
                 write(Integer.toString(position));
                 newline();
 
                 if (hasOffsets) {
-                  int startOffset = posEnum.startOffset();
-                  int endOffset = posEnum.endOffset();
+                  int startOffset = docsEnum.startOffset();
+                  int endOffset = docsEnum.endOffset();
                   assert endOffset >= startOffset;
                   assert startOffset >= lastStartOffset: "startOffset=" + startOffset + " lastStartOffset=" + lastStartOffset;
                   lastStartOffset = startOffset;
@@ -185,7 +182,7 @@
                   newline();
                 }
 
-                BytesRef payload = posEnum.getPayload();
+                BytesRef payload = docsEnum.getPayload();
 
                 if (payload != null && payload.length > 0) {
                   assert payload.length != 0;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
index d415177..53041ec 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
@@ -17,15 +17,7 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.SortedMap;
-import java.util.TreeMap;
-
 import org.apache.lucene.codecs.TermVectorsReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.IndexFileNames;
@@ -43,7 +35,30 @@
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.StringHelper;
 import org.apache.lucene.util.UnicodeUtil;
-import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.*;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.DOC;
+import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.END;
+import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.ENDOFFSET;
+import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.FIELD;
+import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.FIELDNAME;
+import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.FIELDOFFSETS;
+import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.FIELDPAYLOADS;
+import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.FIELDPOSITIONS;
+import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.FIELDTERMCOUNT;
+import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.NUMFIELDS;
+import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.PAYLOAD;
+import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.POSITION;
+import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.STARTOFFSET;
+import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.TERMFREQ;
+import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.TERMTEXT;
+import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.VECTORS_EXTENSION;
 
 /**
  * Reads plain-text term vectors.
@@ -383,7 +398,7 @@
     }
 
     @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+    public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
       SimpleTVPostings postings = current.getValue();
       if (postings.positions == null && postings.startOffsets == null) {
         return null;
@@ -441,7 +456,7 @@
     }
   }
   
-  private static class SimpleTVDocsAndPositionsEnum extends DocsAndPositionsEnum {
+  private static class SimpleTVDocsAndPositionsEnum extends DocsEnum {
     private boolean didNext;
     private int doc = -1;
     private int nextPos;
@@ -499,9 +514,11 @@
 
     @Override
     public int nextPosition() {
-      assert (positions != null && nextPos < positions.length) ||
-        startOffsets != null && nextPos < startOffsets.length;
+      //assert (positions != null && nextPos < positions.length) ||
+      //  startOffsets != null && nextPos < startOffsets.length;
       if (positions != null) {
+        if (nextPos >= positions.length)
+          return NO_MORE_POSITIONS;
         return positions[nextPos++];
       } else {
         nextPos++;
diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java b/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java
index d849951..5d56cc8 100644
--- a/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java
+++ b/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java
@@ -29,7 +29,7 @@
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.CheckIndex;
 import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.TermsEnum;
@@ -67,7 +67,7 @@
     assertEquals(2, allEnums.size());
     
     allEnums.clear();
-    DocsAndPositionsEnum posReuse = null;
+    DocsEnum posReuse = null;
     te = segment.terms("foo").iterator(null);
     while (te.next() != null) {
       posReuse = te.docsAndPositions(null, posReuse);
@@ -108,7 +108,7 @@
     assertEquals(4, allEnums.size());
     
     allEnums.clear();
-    DocsAndPositionsEnum posReuse = null;
+    DocsEnum posReuse = null;
     te = segment.terms("foo").iterator(null);
     while (te.next() != null) {
       posReuse = te.docsAndPositions(null, posReuse);
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/Token.java b/lucene/core/src/java/org/apache/lucene/analysis/Token.java
index 22b5676..92db29f 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/Token.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/Token.java
@@ -24,7 +24,7 @@
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
 import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.index.DocsAndPositionsEnum; // for javadoc
+import org.apache.lucene.index.DocsEnum; // for javadoc
 import org.apache.lucene.util.Attribute;
 import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.AttributeImpl;
@@ -47,7 +47,7 @@
   with type "eos".  The default token type is "word".  
   <p>
   A Token can optionally have metadata (a.k.a. payload) in the form of a variable
-  length byte array. Use {@link DocsAndPositionsEnum#getPayload()} to retrieve the 
+  length byte array. Use {@link DocsEnum#getPayload()} to retrieve the 
   payloads from the index.
   
   <br><br>
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java
index 8793c94..0029ccf 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java
@@ -17,7 +17,7 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.index.DocsAndPositionsEnum; // javadocs
+import org.apache.lucene.index.DocsEnum; // javadocs
 import org.apache.lucene.util.Attribute;
 import org.apache.lucene.util.BytesRef;
 
@@ -33,7 +33,7 @@
  * best to use the minimum number of bytes necessary. Some codec implementations
  * may optimize payload storage when all payloads have the same length.
  * 
- * @see DocsAndPositionsEnum
+ * @see DocsEnum
  */
 public interface PayloadAttribute extends Attribute {
   /**
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java
index 9afd2f9..a7a7cd9 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java
@@ -43,7 +43,7 @@
  *
  * </ul>
  * 
- * @see org.apache.lucene.index.DocsAndPositionsEnum
+ * @see org.apache.lucene.index.DocsEnum
  */
 public interface PositionIncrementAttribute extends Attribute {
   /** Set the position increment. The default value is one.
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsReader.java
index 0b41ac3..d05fd17 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsReader.java
@@ -27,7 +27,7 @@
 import java.util.TreeMap;
 
 import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfo;
@@ -984,7 +984,7 @@
       }
 
       @Override
-      public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+      public DocsEnum docsAndPositions(Bits skipDocs, DocsEnum reuse, int flags) throws IOException {
         if (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
           // Positions were not indexed:
           return null;
@@ -2200,13 +2200,11 @@
       }
 
       @Override
-      public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+      public DocsEnum docsAndPositions(Bits skipDocs, DocsEnum reuse, int flags) throws IOException {
         if (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
           // Positions were not indexed:
           return null;
         }
-
-        assert !eof;
         currentFrame.decodeMetaData();
         return postingsReader.docsAndPositions(fieldInfo, currentFrame.state, skipDocs, reuse, flags);
       }
diff --git a/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsAndPositionsEnum.java b/lucene/core/src/java/org/apache/lucene/codecs/MappingMultiDocsEnum.java
similarity index 75%
rename from lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsAndPositionsEnum.java
rename to lucene/core/src/java/org/apache/lucene/codecs/MappingMultiDocsEnum.java
index bcc3735..19e01b8 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsAndPositionsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/MappingMultiDocsEnum.java
@@ -1,4 +1,4 @@
-package org.apache.lucene.index;
+package org.apache.lucene.codecs;
 
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -17,8 +17,11 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.MergeState;
+import org.apache.lucene.index.MultiDocsEnum;
+import org.apache.lucene.index.MultiDocsEnum.EnumWithSlice;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.index.MultiDocsAndPositionsEnum.EnumWithSlice;
 
 import java.io.IOException;
 
@@ -29,31 +32,34 @@
  * @lucene.experimental
  */
 
-final class MappingMultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
-  private MultiDocsAndPositionsEnum.EnumWithSlice[] subs;
+public final class MappingMultiDocsEnum extends DocsEnum {
+  private MultiDocsEnum.EnumWithSlice[] subs;
   int numSubs;
   int upto;
   MergeState.DocMap currentMap;
-  DocsAndPositionsEnum current;
+  DocsEnum current;
   int currentBase;
   int doc = -1;
   private MergeState mergeState;
-  MultiDocsAndPositionsEnum multiDocsAndPositionsEnum;
 
   /** Sole constructor. */
-  public MappingMultiDocsAndPositionsEnum(MergeState mergeState) {
-    this.mergeState = mergeState;
+  public MappingMultiDocsEnum() {
   }
 
-  MappingMultiDocsAndPositionsEnum reset(MultiDocsAndPositionsEnum postingsEnum) {
-    this.numSubs = postingsEnum.getNumSubs();
-    this.subs = postingsEnum.getSubs();
+  MappingMultiDocsEnum reset(MultiDocsEnum docsEnum) {
+    this.numSubs = docsEnum.getNumSubs();
+    this.subs = docsEnum.getSubs();
     upto = -1;
     current = null;
-    this.multiDocsAndPositionsEnum = postingsEnum;
     return this;
   }
 
+  /** Sets the {@link MergeState}, which is used to re-map
+   *  document IDs. */
+  public void setMergeState(MergeState mergeState) {
+    this.mergeState = mergeState;
+  }
+  
   /** How many sub-readers we are merging.
    *  @see #getSubs */
   public int getNumSubs() {
@@ -79,43 +85,7 @@
   public int advance(int target) {
     throw new UnsupportedOperationException();
   }
-
-  @Override
-  public int nextDoc() throws IOException {
-    while(true) {
-      if (current == null) {
-        if (upto == numSubs-1) {
-          return this.doc = NO_MORE_DOCS;
-        } else {
-          upto++;
-          final int reader = subs[upto].slice.readerIndex;
-          current = subs[upto].docsAndPositionsEnum;
-          currentBase = mergeState.docBase[reader];
-          currentMap = mergeState.docMaps[reader];
-        }
-      }
-
-      int doc = current.nextDoc();
-      if (doc != NO_MORE_DOCS) {
-
-        mergeState.checkAbortCount++;
-        if (mergeState.checkAbortCount > 60000) {
-          mergeState.checkAbort.work(mergeState.checkAbortCount/5.0);
-          mergeState.checkAbortCount = 0;
-        }
-
-        // compact deletions
-        doc = currentMap.get(doc);
-        if (doc == -1) {
-          continue;
-        }
-        return this.doc = currentBase + doc;
-      } else {
-        current = null;
-      }
-    }
-  }
-
+  
   @Override
   public int nextPosition() throws IOException {
     return current.nextPosition();
@@ -137,10 +107,40 @@
   }
 
   @Override
+  public int nextDoc() throws IOException {
+    while(true) {
+      if (current == null) {
+        if (upto == numSubs-1) {
+          return this.doc = NO_MORE_DOCS;
+        } else {
+          upto++;
+          final int reader = subs[upto].slice.readerIndex;
+          current = subs[upto].docsEnum;
+          currentBase = mergeState.docBase[reader];
+          currentMap = mergeState.docMaps[reader];
+          assert currentMap.maxDoc() == subs[upto].slice.length: "readerIndex=" + reader + " subs.len=" + subs.length + " len1=" + currentMap.maxDoc() + " vs " + subs[upto].slice.length;
+        }
+      }
+
+      int doc = current.nextDoc();
+      if (doc != NO_MORE_DOCS) {
+        // compact deletions
+        doc = currentMap.get(doc);
+        if (doc == -1) {
+          continue;
+        }
+        return this.doc = currentBase + doc;
+      } else {
+        current = null;
+      }
+    }
+  }
+
+  @Override
   public long cost() {
     long cost = 0;
     for (EnumWithSlice enumWithSlice : subs) {
-      cost += enumWithSlice.docsAndPositionsEnum.cost();
+      cost += enumWithSlice.docsEnum.cost();
     }
     return cost;
   }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java b/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java
index d8471b9..3b33533 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java
@@ -21,7 +21,7 @@
 import java.io.Closeable;
 
 import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.DataInput;
@@ -30,7 +30,7 @@
 /** The core terms dictionaries (BlockTermsReader,
  *  BlockTreeTermsReader) interact with a single instance
  *  of this class to manage creation of {@link DocsEnum} and
- *  {@link DocsAndPositionsEnum} instances.  It provides an
+ *  {@link DocsEnum} instances.  It provides an
  *  IndexInput (termsIn) where this class may read any
  *  previously stored data that it had written in its
  *  corresponding {@link PostingsWriterBase} at indexing
@@ -66,8 +66,9 @@
 
   /** Must fully consume state, since after this call that
    *  TermState may be reused. */
-  public abstract DocsAndPositionsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState state, Bits skipDocs, DocsAndPositionsEnum reuse,
+  public abstract DocsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState state, Bits skipDocs, DocsEnum reuse,
                                                         int flags) throws IOException;
+  // nocommit this still has the distinction - no need to remove this as long as we get the interface straight?
 
   /** Returns approximate RAM bytes used */
   public abstract long ramBytesUsed();
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java b/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java
index 645ee01..7797300 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java
@@ -20,7 +20,6 @@
 import java.io.Closeable;
 import java.io.IOException;
 
-import org.apache.lucene.index.DocsAndPositionsEnum; // javadocs
 import org.apache.lucene.index.DocsEnum; // javadocs
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.TermsEnum;
@@ -52,8 +51,8 @@
   public abstract void init(IndexOutput termsOut) throws IOException;
 
   /** Write all postings for one term; use the provided
-   *  {@link TermsEnum} to pull a {@link DocsEnum} or {@link
-   *  DocsAndPositionsEnum}.  This method should not
+   *  {@link TermsEnum} to pull a {@link DocsEnum}.
+   *  This method should not
    *  re-position the {@code TermsEnum}!  It is already
    *  positioned on the term that should be written.  This
    *  method must set the bit in the provided {@link
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java b/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java
index a310cf6..bd25e0c 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java
@@ -17,18 +17,17 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.store.DataOutput;
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.FixedBitSet;
 
+import java.io.IOException;
+
 /**
  * Extension of {@link PostingsWriterBase}, adding a push
  * API for writing each element of the postings.  This API
@@ -45,7 +44,6 @@
 
   // Reused in writeTerm
   private DocsEnum docsEnum;
-  private DocsAndPositionsEnum posEnum;
   private int enumFlags;
 
   /** {@link FieldInfo} of current field being written. */
@@ -125,15 +123,15 @@
       enumFlags = DocsEnum.FLAG_FREQS;
     } else if (writeOffsets == false) {
       if (writePayloads) {
-        enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS;
+        enumFlags = DocsEnum.FLAG_PAYLOADS;
       } else {
         enumFlags = 0;
       }
     } else {
       if (writePayloads) {
-        enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS | DocsAndPositionsEnum.FLAG_OFFSETS;
+        enumFlags = DocsEnum.FLAG_PAYLOADS | DocsEnum.FLAG_OFFSETS;
       } else {
-        enumFlags = DocsAndPositionsEnum.FLAG_OFFSETS;
+        enumFlags = DocsEnum.FLAG_OFFSETS;
       }
     }
 
@@ -146,8 +144,7 @@
     if (writePositions == false) {
       docsEnum = termsEnum.docs(null, docsEnum, enumFlags);
     } else {
-      posEnum = termsEnum.docsAndPositions(null, posEnum, enumFlags);
-      docsEnum = posEnum;
+      docsEnum = termsEnum.docsAndPositions(null, docsEnum, enumFlags);
     }
     assert docsEnum != null;
 
@@ -171,13 +168,13 @@
 
       if (writePositions) {
         for(int i=0;i<freq;i++) {
-          int pos = posEnum.nextPosition();
-          BytesRef payload = writePayloads ? posEnum.getPayload() : null;
+          int pos = docsEnum.nextPosition();
+          BytesRef payload = writePayloads ? docsEnum.getPayload() : null;
           int startOffset;
           int endOffset;
           if (writeOffsets) {
-            startOffset = posEnum.startOffset();
-            endOffset = posEnum.endOffset();
+            startOffset = docsEnum.startOffset();
+            endOffset = docsEnum.endOffset();
           } else {
             startOffset = -1;
             endOffset = -1;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsReader.java
index 95472cb..0312a99 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsReader.java
@@ -21,7 +21,7 @@
 import java.io.IOException;
 
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; // javadocs
-import org.apache.lucene.index.DocsAndPositionsEnum; // javadocs
+import org.apache.lucene.index.DocsEnum; // javadocs
 import org.apache.lucene.index.Fields;
 
 /**
@@ -39,7 +39,7 @@
   /** Returns term vectors for this document, or null if
    *  term vectors were not indexed. If offsets are
    *  available they are in an {@link OffsetAttribute}
-   *  available from the {@link DocsAndPositionsEnum}. */
+   *  available from the {@link DocsEnum}. */
   public abstract Fields get(int doc) throws IOException;
 
   /** Returns approximate RAM bytes used */
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsWriter.java
index c3a3019..c944402 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsWriter.java
@@ -22,7 +22,7 @@
 import java.util.Iterator;
 
 import org.apache.lucene.index.AtomicReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.Fields;
@@ -223,7 +223,7 @@
     String lastFieldName = null;
     
     TermsEnum termsEnum = null;
-    DocsAndPositionsEnum docsAndPositionsEnum = null;
+    DocsEnum docsAndPositionsEnum = null;
     
     int fieldCount = 0;
     for(String fieldName : vectors) {
@@ -280,7 +280,7 @@
             
             final BytesRef payload = docsAndPositionsEnum.getPayload();
 
-            assert !hasPositions || pos >= 0;
+            assert !hasPositions || pos >= 0 ;
             addPosition(pos, startOffset, endOffset, payload);
           }
         }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
index fb466c3..f4a5d47 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
@@ -17,27 +17,9 @@
  * limitations under the License.
  */
 
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.BLOCK_SIZE;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.CODEC_SFX_DAT;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.CODEC_SFX_IDX;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.FLAGS_BITS;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.OFFSETS;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.PAYLOADS;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.POSITIONS;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VECTORS_EXTENSION;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VECTORS_INDEX_EXTENSION;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VERSION_CURRENT;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VERSION_START;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.NoSuchElementException;
-
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.TermVectorsReader;
 import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
@@ -59,6 +41,23 @@
 import org.apache.lucene.util.packed.BlockPackedReaderIterator;
 import org.apache.lucene.util.packed.PackedInts;
 
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.BLOCK_SIZE;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.CODEC_SFX_DAT;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.CODEC_SFX_IDX;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.FLAGS_BITS;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.OFFSETS;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.PAYLOADS;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.POSITIONS;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VECTORS_EXTENSION;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VECTORS_INDEX_EXTENSION;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VERSION_CURRENT;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VERSION_START;
+
 
 /**
  * {@link TermVectorsReader} for {@link CompressingTermVectorsFormat}.
@@ -882,17 +881,13 @@
     }
 
     @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
-      if (positions == null && startOffsets == null) {
-        return null;
-      }
-      // TODO: slightly sheisty
-      return (DocsAndPositionsEnum) docs(liveDocs, reuse, flags);
+    public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+      return docs(liveDocs, reuse, flags);
     }
 
   }
 
-  private static class TVDocsEnum extends DocsAndPositionsEnum {
+  private static class TVDocsEnum extends DocsEnum {
 
     private Bits liveDocs;
     private int doc = -1;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40PostingsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40PostingsReader.java
index e73a7a2..f87b78a 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40PostingsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40PostingsReader.java
@@ -17,16 +17,12 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.Arrays;
-
 import org.apache.lucene.codecs.BlockTermState;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.PostingsReaderBase;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.SegmentInfo;
@@ -41,6 +37,9 @@
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 
+import java.io.IOException;
+import java.util.Arrays;
+
 /** 
  * Concrete class that reads the 4.0 frq/prox
  * postings format. 
@@ -228,8 +227,8 @@
   }
 
   @Override
-  public DocsAndPositionsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState termState, Bits liveDocs,
-                                               DocsAndPositionsEnum reuse, int flags)
+  public DocsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState termState, Bits liveDocs,
+                                               DocsEnum reuse, int flags)
     throws IOException {
 
     boolean hasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
@@ -663,7 +662,7 @@
   // TODO specialize DocsAndPosEnum too
   
   // Decodes docs & positions. payloads nor offsets are present.
-  private final class SegmentDocsAndPositionsEnum extends DocsAndPositionsEnum {
+  private final class SegmentDocsAndPositionsEnum extends DocsEnum {
     final IndexInput startFreqIn;
     private final IndexInput freqIn;
     private final IndexInput proxIn;
@@ -818,6 +817,9 @@
     @Override
     public int nextPosition() throws IOException {
 
+      if (posPendingCount == 0)
+        return NO_MORE_POSITIONS;
+
       if (lazyProxPointer != -1) {
         proxIn.seek(lazyProxPointer);
         lazyProxPointer = -1;
@@ -866,7 +868,7 @@
   }
   
   // Decodes docs & positions & (payloads and/or offsets)
-  private class SegmentFullPositionsEnum extends DocsAndPositionsEnum {
+  private class SegmentFullPositionsEnum extends DocsEnum {
     final IndexInput startFreqIn;
     private final IndexInput freqIn;
     private final IndexInput proxIn;
@@ -1041,6 +1043,9 @@
     @Override
     public int nextPosition() throws IOException {
 
+      if (posPendingCount == 0)
+        return NO_MORE_POSITIONS;
+
       if (lazyProxPointer != -1) {
         proxIn.seek(lazyProxPointer);
         lazyProxPointer = -1;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsReader.java
index 8de01a9..b636206 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsReader.java
@@ -24,10 +24,8 @@
 import java.util.Iterator;
 import java.util.Map;
 import java.util.NoSuchElementException;
-
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.TermVectorsReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
@@ -539,7 +537,7 @@
     }
 
     @Override
-    public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags /* ignored */) throws IOException {
+    public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
       TVDocsEnum docsEnum;
       if (reuse != null && reuse instanceof TVDocsEnum) {
         docsEnum = (TVDocsEnum) reuse;
@@ -551,7 +549,7 @@
     }
 
     @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+    public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
 
       if (!storePositions && !storeOffsets) {
         return null;
@@ -614,7 +612,7 @@
     }
   }
 
-  private static class TVDocsAndPositionsEnum extends DocsAndPositionsEnum {
+  private static class TVDocsAndPositionsEnum extends DocsEnum {
     private boolean didNext;
     private int doc = -1;
     private int nextPos;
@@ -687,8 +685,10 @@
 
     @Override
     public int nextPosition() {
-      assert (positions != null && nextPos < positions.length) ||
-        startOffsets != null && nextPos < startOffsets.length;
+      //assert (positions != null && nextPos < positions.length) ||
+      //  startOffsets != null && nextPos < startOffsets.length;
+      if (positions != null && nextPos >= positions.length)
+        return NO_MORE_POSITIONS;
 
       if (positions != null) {
         return positions[nextPos++];
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene41/Lucene41PostingsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene41/Lucene41PostingsReader.java
index 9ae2265..9367fae 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene41/Lucene41PostingsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene41/Lucene41PostingsReader.java
@@ -17,6 +17,7 @@
  * limitations under the License.
  */
 
+
 import static org.apache.lucene.codecs.lucene41.Lucene41PostingsFormat.BLOCK_SIZE;
 import static org.apache.lucene.codecs.lucene41.ForUtil.MAX_DATA_SIZE;
 import static org.apache.lucene.codecs.lucene41.ForUtil.MAX_ENCODED_SIZE;
@@ -28,7 +29,6 @@
 import org.apache.lucene.codecs.BlockTermState;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.PostingsReaderBase;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
@@ -218,6 +218,7 @@
     
   @Override
   public DocsEnum docs(FieldInfo fieldInfo, BlockTermState termState, Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+    /*
     BlockDocsEnum docsEnum;
     if (reuse instanceof BlockDocsEnum) {
       docsEnum = (BlockDocsEnum) reuse;
@@ -228,20 +229,22 @@
       docsEnum = new BlockDocsEnum(fieldInfo);
     }
     return docsEnum.reset(liveDocs, (IntBlockTermState) termState, flags);
+    */
+    return docsAndPositions(fieldInfo, termState, liveDocs, reuse, flags);
   }
 
   // TODO: specialize to liveDocs vs not
   
   @Override
-  public DocsAndPositionsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState termState, Bits liveDocs,
-                                               DocsAndPositionsEnum reuse, int flags)
+  public DocsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState termState, Bits liveDocs,
+                                               DocsEnum reuse, int flags)
     throws IOException {
 
     boolean indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
     boolean indexHasPayloads = fieldInfo.hasPayloads();
 
-    if ((!indexHasOffsets || (flags & DocsAndPositionsEnum.FLAG_OFFSETS) == 0) &&
-        (!indexHasPayloads || (flags & DocsAndPositionsEnum.FLAG_PAYLOADS) == 0)) {
+    if ((!indexHasOffsets || (flags & DocsEnum.FLAG_OFFSETS) == 0) &&
+        (!indexHasPayloads || (flags & DocsEnum.FLAG_PAYLOADS) == 0)) {
       BlockDocsAndPositionsEnum docsAndPositionsEnum;
       if (reuse instanceof BlockDocsAndPositionsEnum) {
         docsAndPositionsEnum = (BlockDocsAndPositionsEnum) reuse;
@@ -544,7 +547,7 @@
   }
 
 
-  final class BlockDocsAndPositionsEnum extends DocsAndPositionsEnum {
+  final class BlockDocsAndPositionsEnum extends DocsEnum {
     
     private final byte[] encoded;
 
@@ -622,7 +625,7 @@
         indexHasPayloads == fieldInfo.hasPayloads();
     }
     
-    public DocsAndPositionsEnum reset(Bits liveDocs, IntBlockTermState termState) throws IOException {
+    public DocsEnum reset(Bits liveDocs, IntBlockTermState termState) throws IOException {
       this.liveDocs = liveDocs;
       // if (DEBUG) {
       //   System.out.println("  FPR.reset: termState=" + termState);
@@ -910,6 +913,9 @@
       // if (DEBUG) {
       //   System.out.println("    FPR.nextPosition posPendingCount=" + posPendingCount + " posBufferUpto=" + posBufferUpto);
       // }
+      if (posPendingCount == 0)
+        return NO_MORE_POSITIONS;
+
       if (posPendingFP != -1) {
         // if (DEBUG) {
         //   System.out.println("      seek to pendingFP=" + posPendingFP);
@@ -939,6 +945,16 @@
     }
 
     @Override
+    public int startPosition() {
+      return position;
+    }
+
+    @Override
+    public int endPosition() {
+      return position;
+    }
+
+    @Override
     public int startOffset() {
       return -1;
     }
@@ -960,7 +976,7 @@
   }
 
   // Also handles payloads + offsets
-  final class EverythingEnum extends DocsAndPositionsEnum {
+  final class EverythingEnum extends DocsEnum {
     
     private final byte[] encoded;
 
@@ -1109,8 +1125,8 @@
         lastPosBlockFP = posTermStartFP + termState.lastPosBlockOffset;
       }
 
-      this.needsOffsets = (flags & DocsAndPositionsEnum.FLAG_OFFSETS) != 0;
-      this.needsPayloads = (flags & DocsAndPositionsEnum.FLAG_PAYLOADS) != 0;
+      this.needsOffsets = (flags & DocsEnum.FLAG_OFFSETS) != 0;
+      this.needsPayloads = (flags & DocsEnum.FLAG_PAYLOADS) != 0;
 
       doc = -1;
       accum = 0;
@@ -1462,6 +1478,9 @@
       // if (DEBUG) {
       //   System.out.println("    FPR.nextPosition posPendingCount=" + posPendingCount + " posBufferUpto=" + posBufferUpto + " payloadByteUpto=" + payloadByteUpto)// ;
       // }
+      if (posPendingCount == 0)
+        return NO_MORE_POSITIONS;
+
       if (posPendingFP != -1) {
         // if (DEBUG) {
         //   System.out.println("      seek pos to pendingFP=" + posPendingFP);
@@ -1515,6 +1534,16 @@
     }
 
     @Override
+    public int startPosition() {
+      return position;
+    }
+
+    @Override
+    public int endPosition() {
+      return position;
+    }
+
+    @Override
     public int startOffset() {
       return startOffset;
     }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesProducer.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesProducer.java
index c7117a1..a78d063 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesProducer.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesProducer.java
@@ -25,7 +25,6 @@
 import org.apache.lucene.codecs.DocValuesProducer;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
@@ -572,7 +571,7 @@
     }
 
     @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+    public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
       throw new UnsupportedOperationException();
     }
   }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene45/Lucene45DocValuesProducer.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene45/Lucene45DocValuesProducer.java
index 3e09281..9537f88 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene45/Lucene45DocValuesProducer.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene45/Lucene45DocValuesProducer.java
@@ -33,7 +33,6 @@
 import org.apache.lucene.codecs.DocValuesProducer;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
@@ -827,7 +826,7 @@
         }
 
         @Override
-        public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+        public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
           throw new UnsupportedOperationException();
         }
       };
diff --git a/lucene/core/src/java/org/apache/lucene/index/AtomicReader.java b/lucene/core/src/java/org/apache/lucene/index/AtomicReader.java
index a3e28e8..3ea850e 100644
--- a/lucene/core/src/java/org/apache/lucene/index/AtomicReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/AtomicReader.java
@@ -17,11 +17,10 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-
-import org.apache.lucene.search.SearcherManager; // javadocs
 import org.apache.lucene.util.Bits;
 
+import java.io.IOException;
+
 /** {@code AtomicReader} is an abstract class, providing an interface for accessing an
  index.  Search of an index is done entirely through this abstract interface,
  so that any subclass which implements it is searchable. IndexReaders implemented
@@ -164,11 +163,11 @@
     return null;
   }
 
-  /** Returns {@link DocsAndPositionsEnum} for the specified
+  /** Returns {@link DocsEnum} for the specified
    *  term.  This will return null if the
    *  field or term does not exist or positions weren't indexed. 
-   *  @see TermsEnum#docsAndPositions(Bits, DocsAndPositionsEnum) */
-  public final DocsAndPositionsEnum termPositionsEnum(Term term) throws IOException {
+   *  @see TermsEnum#docsAndPositions(Bits, DocsEnum) */
+  public final DocsEnum termPositionsEnum(Term term) throws IOException {
     assert term.field() != null;
     assert term.bytes() != null;
     final Fields fields = fields();
diff --git a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
index c90c723..1fca757 100644
--- a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
+++ b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
@@ -713,7 +713,7 @@
     
     DocsEnum docs = null;
     DocsEnum docsAndFreqs = null;
-    DocsAndPositionsEnum postings = null;
+    DocsEnum postings = null;
     
     String lastField = null;
     for (String field : fields) {
@@ -1534,11 +1534,11 @@
       }
 
       DocsEnum docs = null;
-      DocsAndPositionsEnum postings = null;
+      DocsEnum postings = null;
 
       // Only used if crossCheckTermVectors is true:
       DocsEnum postingsDocs = null;
-      DocsAndPositionsEnum postingsPostings = null;
+      DocsEnum postingsPostings = null;
 
       final Bits liveDocs = reader.getLiveDocs();
 
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocTermOrds.java b/lucene/core/src/java/org/apache/lucene/index/DocTermOrds.java
index 42f1b21..5fbc2a3 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocTermOrds.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocTermOrds.java
@@ -616,7 +616,7 @@
     }
 
     @Override    
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+    public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
       return termsEnum.docsAndPositions(liveDocs, reuse, flags);
     }
 
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocsAndPositionsEnum.java b/lucene/core/src/java/org/apache/lucene/index/DocsAndPositionsEnum.java
deleted file mode 100644
index 60ac2bb..0000000
--- a/lucene/core/src/java/org/apache/lucene/index/DocsAndPositionsEnum.java
+++ /dev/null
@@ -1,62 +0,0 @@
-package org.apache.lucene.index;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-
-import org.apache.lucene.util.Bits; // javadocs
-import org.apache.lucene.util.BytesRef;
-
-/** Also iterates through positions. */
-public abstract class DocsAndPositionsEnum extends DocsEnum {
-  
-  /** Flag to pass to {@link TermsEnum#docsAndPositions(Bits,DocsAndPositionsEnum,int)}
-   *  if you require offsets in the returned enum. */
-  public static final int FLAG_OFFSETS = 0x1;
-
-  /** Flag to pass to  {@link TermsEnum#docsAndPositions(Bits,DocsAndPositionsEnum,int)}
-   *  if you require payloads in the returned enum. */
-  public static final int FLAG_PAYLOADS = 0x2;
-
-  /** Sole constructor. (For invocation by subclass 
-   * constructors, typically implicit.) */
-  protected DocsAndPositionsEnum() {
-  }
-
-  /** Returns the next position.  You should only call this
-   *  up to {@link DocsEnum#freq()} times else
-   *  the behavior is not defined.  If positions were not
-   *  indexed this will return -1; this only happens if
-   *  offsets were indexed and you passed needsOffset=true
-   *  when pulling the enum.  */
-  public abstract int nextPosition() throws IOException;
-
-  /** Returns start offset for the current position, or -1
-   *  if offsets were not indexed. */
-  public abstract int startOffset() throws IOException;
-
-  /** Returns end offset for the current position, or -1 if
-   *  offsets were not indexed. */
-  public abstract int endOffset() throws IOException;
-
-  /** Returns the payload at this position, or null if no
-   *  payload was indexed. You should not modify anything 
-   *  (neither members of the returned BytesRef nor bytes 
-   *  in the byte[]). */
-  public abstract BytesRef getPayload() throws IOException;
-}
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocsEnum.java b/lucene/core/src/java/org/apache/lucene/index/DocsEnum.java
index fa4cf54..30c1ef2 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocsEnum.java
@@ -17,12 +17,13 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.util.AttributeSource;
-import org.apache.lucene.util.Bits; // javadocs
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+
+import java.io.IOException;
 
 /** Iterates through the documents and term freqs.
  *  NOTE: you must first call {@link #nextDoc} before using
@@ -31,15 +32,23 @@
   
   /**
    * Flag to pass to {@link TermsEnum#docs(Bits,DocsEnum,int)} if you don't
-   * require term frequencies in the returned enum. When passed to
-   * {@link TermsEnum#docsAndPositions(Bits,DocsAndPositionsEnum,int)} means
-   * that no offsets and payloads will be returned.
+   * require term frequencies in the returned enum.
    */
   public static final int FLAG_NONE = 0x0;
 
   /** Flag to pass to {@link TermsEnum#docs(Bits,DocsEnum,int)}
    *  if you require term frequencies in the returned enum. */
   public static final int FLAG_FREQS = 0x1;
+  
+  /** Flag to pass to {@link TermsEnum#docs(Bits,DocsEnum,int)}
+   *  if you require offsets in the returned enum. */
+  public static final int FLAG_OFFSETS = 0x1;
+
+  /** Flag to pass to  {@link TermsEnum#docs(Bits,DocsEnum,int)}
+   *  if you require payloads in the returned enum. */
+  public static final int FLAG_PAYLOADS = 0x2;
+
+  public static final int NO_MORE_POSITIONS = Integer.MAX_VALUE;
 
   private AttributeSource atts = null;
 
@@ -65,4 +74,42 @@
     if (atts == null) atts = new AttributeSource();
     return atts;
   }
+
+  /** Returns the next position.  You should only call this
+   *  up to {@link DocsEnum#freq()} times else
+   *  the behavior is not defined.  If positions were not
+   *  indexed this will return -1; this only happens if
+   *  offsets were indexed and you passed needsOffset=true
+   *  when pulling the enum.  */
+  public int nextPosition() throws IOException {
+     return -1;
+  }
+
+  public int startPosition() throws IOException {
+    return -1;
+  }
+
+  public int endPosition() throws IOException {
+    return -1;
+  }
+
+  /** Returns start offset for the current position, or -1
+   *  if offsets were not indexed. */
+  public int startOffset() throws IOException {
+    return -1;
+  }
+
+  /** Returns end offset for the current position, or -1 if
+   *  offsets were not indexed. */
+  public int endOffset() throws IOException {
+    return -1;
+  }
+
+  /** Returns the payload at this position, or null if no
+   *  payload was indexed. You should not modify anything 
+   *  (neither members of the returned BytesRef nor bytes 
+   *  in the byte[]). */
+  public BytesRef getPayload() throws IOException {
+    return null;
+  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/FilterAtomicReader.java b/lucene/core/src/java/org/apache/lucene/index/FilterAtomicReader.java
index 8a618ec..4931046 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FilterAtomicReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FilterAtomicReader.java
@@ -196,7 +196,7 @@
     }
 
     @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+    public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
       return in.docsAndPositions(liveDocs, reuse, flags);
     }
   }
@@ -240,55 +240,21 @@
     }
 
     @Override
-    public long cost() {
-      return in.cost();
-    }
-  }
-
-  /** Base class for filtering {@link DocsAndPositionsEnum} implementations. */
-  public static class FilterDocsAndPositionsEnum extends DocsAndPositionsEnum {
-    /** The underlying DocsAndPositionsEnum instance. */
-    protected final DocsAndPositionsEnum in;
-
-    /**
-     * Create a new FilterDocsAndPositionsEnum
-     * @param in the underlying DocsAndPositionsEnum instance.
-     */
-    public FilterDocsAndPositionsEnum(DocsAndPositionsEnum in) {
-      this.in = in;
-    }
-
-    @Override
-    public AttributeSource attributes() {
-      return in.attributes();
-    }
-
-    @Override
-    public int docID() {
-      return in.docID();
-    }
-
-    @Override
-    public int freq() throws IOException {
-      return in.freq();
-    }
-
-    @Override
-    public int nextDoc() throws IOException {
-      return in.nextDoc();
-    }
-
-    @Override
-    public int advance(int target) throws IOException {
-      return in.advance(target);
-    }
-
-    @Override
     public int nextPosition() throws IOException {
       return in.nextPosition();
     }
 
     @Override
+    public int startPosition() throws IOException {
+      return in.startPosition();
+    }
+
+    @Override
+    public int endPosition() throws IOException {
+      return in.endPosition();
+    }
+
+    @Override
     public int startOffset() throws IOException {
       return in.startOffset();
     }
diff --git a/lucene/core/src/java/org/apache/lucene/index/FilteredTermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/FilteredTermsEnum.java
index 3213a22..5bbf96a 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FilteredTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FilteredTermsEnum.java
@@ -181,7 +181,7 @@
   }
     
   @Override
-  public DocsAndPositionsEnum docsAndPositions(Bits bits, DocsAndPositionsEnum reuse, int flags) throws IOException {
+  public DocsEnum docsAndPositions(Bits bits, DocsEnum reuse, int flags) throws IOException {
     return tenum.docsAndPositions(bits, reuse, flags);
   }
   
diff --git a/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java b/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java
index 605dbfd..2d91979 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java
@@ -17,18 +17,18 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.index.FreqProxTermsWriterPerField.FreqProxPostingsArray;
+import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+
 import java.io.IOException;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.lucene.index.FieldInfo.IndexOptions;
-import org.apache.lucene.index.FreqProxTermsWriterPerField.FreqProxPostingsArray;
-import org.apache.lucene.util.AttributeSource; // javadocs
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.BytesRef;
-
 /** Implements limited (iterators only, no stats) {@link
  *  Fields} interface over the in-RAM buffered
  *  fields/terms/postings, to flush postings through the
@@ -254,34 +254,8 @@
     }
 
     @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
-      if (liveDocs != null) {
-        throw new IllegalArgumentException("liveDocs must be null");
-      }
-      FreqProxDocsAndPositionsEnum posEnum;
-
-      if (!terms.hasProx) {
-        // Caller wants positions but we didn't index them;
-        // don't lie:
-        throw new IllegalArgumentException("did not index positions");
-      }
-
-      if (!terms.hasOffsets && (flags & DocsAndPositionsEnum.FLAG_OFFSETS) != 0) {
-        // Caller wants offsets but we didn't index them;
-        // don't lie:
-        throw new IllegalArgumentException("did not index offsets");
-      }
-
-      if (reuse instanceof FreqProxDocsAndPositionsEnum) {
-        posEnum = (FreqProxDocsAndPositionsEnum) reuse;
-        if (posEnum.postingsArray != postingsArray) {
-          posEnum = new FreqProxDocsAndPositionsEnum(terms, postingsArray);
-        }
-      } else {
-        posEnum = new FreqProxDocsAndPositionsEnum(terms, postingsArray);
-      }
-      posEnum.reset(sortedTermIDs[ord]);
-      return posEnum;
+    public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) {
+      return docs(liveDocs, reuse, flags);
     }
 
     /**
@@ -387,142 +361,4 @@
     }
   }
 
-  private static class FreqProxDocsAndPositionsEnum extends DocsAndPositionsEnum {
-
-    final FreqProxTermsWriterPerField terms;
-    final FreqProxPostingsArray postingsArray;
-    final ByteSliceReader reader = new ByteSliceReader();
-    final ByteSliceReader posReader = new ByteSliceReader();
-    final boolean readOffsets;
-    int docID;
-    int freq;
-    int pos;
-    int startOffset;
-    int endOffset;
-    int posLeft;
-    int termID;
-    boolean ended;
-    boolean hasPayload;
-    BytesRef payload = new BytesRef();
-
-    public FreqProxDocsAndPositionsEnum(FreqProxTermsWriterPerField terms, FreqProxPostingsArray postingsArray) {
-      this.terms = terms;
-      this.postingsArray = postingsArray;
-      this.readOffsets = terms.hasOffsets;
-      assert terms.hasProx;
-      assert terms.hasFreq;
-    }
-
-    public void reset(int termID) {
-      this.termID = termID;
-      terms.termsHashPerField.initReader(reader, termID, 0);
-      terms.termsHashPerField.initReader(posReader, termID, 1);
-      ended = false;
-      docID = 0;
-      posLeft = 0;
-    }
-
-    @Override
-    public int docID() {
-      return docID;
-    }
-
-    @Override
-    public int freq() {
-      return freq;
-    }
-
-    @Override
-    public int nextDoc() throws IOException {
-      while (posLeft != 0) {
-        nextPosition();
-      }
-
-      if (reader.eof()) {
-        if (ended) {
-          return NO_MORE_DOCS;
-        } else {
-          ended = true;
-          docID = postingsArray.lastDocIDs[termID];
-          freq = postingsArray.termFreqs[termID];
-        }
-      } else {
-        int code = reader.readVInt();
-        docID += code >>> 1;
-        if ((code & 1) != 0) {
-          freq = 1;
-        } else {
-          freq = reader.readVInt();
-        }
-
-        assert docID != postingsArray.lastDocIDs[termID];
-      }
-
-      posLeft = freq;
-      pos = 0;
-      startOffset = 0;
-      return docID;
-    }
-
-    @Override
-    public int advance(int target) {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public long cost() {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public int nextPosition() throws IOException {
-      assert posLeft > 0;
-      posLeft--;
-      int code = posReader.readVInt();
-      pos += code >>> 1;
-      if ((code & 1) != 0) {
-        hasPayload = true;
-        // has a payload
-        payload.length = posReader.readVInt();
-        if (payload.bytes.length < payload.length) {
-          payload.grow(payload.length);
-        }
-        posReader.readBytes(payload.bytes, 0, payload.length);
-      } else {
-        hasPayload = false;
-      }
-
-      if (readOffsets) {
-        startOffset += posReader.readVInt();
-        endOffset = startOffset + posReader.readVInt();
-      }
-
-      return pos;
-    }
-
-    @Override
-    public int startOffset() {
-      if (!readOffsets) {
-        throw new IllegalStateException("offsets were not indexed");
-      }
-      return startOffset;
-    }
-
-    @Override
-    public int endOffset() {
-      if (!readOffsets) {
-        throw new IllegalStateException("offsets were not indexed");
-      }
-      return endOffset;
-    }
-
-    @Override
-    public BytesRef getPayload() {
-      if (hasPayload) {
-        return payload;
-      } else {
-        return null;
-      }
-    }
-  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java b/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java
index 7bee81e..953ade4 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java
@@ -17,10 +17,10 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-
 import org.apache.lucene.util.Bits;
 
+import java.io.IOException;
+
 import static org.apache.lucene.index.FilterAtomicReader.FilterFields;
 import static org.apache.lucene.index.FilterAtomicReader.FilterTerms;
 import static org.apache.lucene.index.FilterAtomicReader.FilterTermsEnum;
@@ -116,21 +116,5 @@
       return mappingDocsEnum;
     }
 
-    @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
-      if (liveDocs != null) {
-        throw new IllegalArgumentException("liveDocs must be null");
-      }
-      MappingMultiDocsAndPositionsEnum mappingDocsAndPositionsEnum;
-      if (reuse instanceof MappingMultiDocsAndPositionsEnum) {
-        mappingDocsAndPositionsEnum = (MappingMultiDocsAndPositionsEnum) reuse;
-      } else {
-        mappingDocsAndPositionsEnum = new MappingMultiDocsAndPositionsEnum(mergeState);
-      }
-      
-      MultiDocsAndPositionsEnum docsAndPositionsEnum = (MultiDocsAndPositionsEnum) in.docsAndPositions(liveDocs, mappingDocsAndPositionsEnum.multiDocsAndPositionsEnum, flags);
-      mappingDocsAndPositionsEnum.reset(docsAndPositionsEnum);
-      return mappingDocsAndPositionsEnum;
-    }
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java
deleted file mode 100644
index 8bd2b84..0000000
--- a/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java
+++ /dev/null
@@ -1,193 +0,0 @@
-package org.apache.lucene.index;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.util.BytesRef;
-
-import java.io.IOException;
-import java.util.Arrays;
-
-/**
- * Exposes flex API, merged from flex API of sub-segments.
- *
- * @lucene.experimental
- */
-
-public final class MultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
-  private final MultiTermsEnum parent;
-  final DocsAndPositionsEnum[] subDocsAndPositionsEnum;
-  private EnumWithSlice[] subs;
-  int numSubs;
-  int upto;
-  DocsAndPositionsEnum current;
-  int currentBase;
-  int doc = -1;
-
-  /** Sole constructor. */
-  public MultiDocsAndPositionsEnum(MultiTermsEnum parent, int subReaderCount) {
-    this.parent = parent;
-    subDocsAndPositionsEnum = new DocsAndPositionsEnum[subReaderCount];
-  }
-
-  /** Returns {@code true} if this instance can be reused by
-   *  the provided {@link MultiTermsEnum}. */
-  public boolean canReuse(MultiTermsEnum parent) {
-    return this.parent == parent;
-  }
-
-  /** Rre-use and reset this instance on the provided slices. */
-  public MultiDocsAndPositionsEnum reset(final EnumWithSlice[] subs, final int numSubs) {
-    this.numSubs = numSubs;
-    this.subs = new EnumWithSlice[subs.length];
-    for(int i=0;i<subs.length;i++) {
-      this.subs[i] = new EnumWithSlice();
-      this.subs[i].docsAndPositionsEnum = subs[i].docsAndPositionsEnum;
-      this.subs[i].slice = subs[i].slice;
-    }
-    upto = -1;
-    doc = -1;
-    current = null;
-    return this;
-  }
-
-  /** How many sub-readers we are merging.
-   *  @see #getSubs */
-  public int getNumSubs() {
-    return numSubs;
-  }
-
-  /** Returns sub-readers we are merging. */
-  public EnumWithSlice[] getSubs() {
-    return subs;
-  }
-
-  @Override
-  public int freq() throws IOException {
-    assert current != null;
-    return current.freq();
-  }
-
-  @Override
-  public int docID() {
-    return doc;
-  }
-
-  @Override
-  public int advance(int target) throws IOException {
-    assert target > doc;
-    while(true) {
-      if (current != null) {
-        final int doc;
-        if (target < currentBase) {
-          // target was in the previous slice but there was no matching doc after it
-          doc = current.nextDoc();
-        } else {
-          doc = current.advance(target-currentBase);
-        }
-        if (doc == NO_MORE_DOCS) {
-          current = null;
-        } else {
-          return this.doc = doc + currentBase;
-        }
-      } else if (upto == numSubs-1) {
-        return this.doc = NO_MORE_DOCS;
-      } else {
-        upto++;
-        current = subs[upto].docsAndPositionsEnum;
-        currentBase = subs[upto].slice.start;
-      }
-    }
-  }
-
-  @Override
-  public int nextDoc() throws IOException {
-    while(true) {
-      if (current == null) {
-        if (upto == numSubs-1) {
-          return this.doc = NO_MORE_DOCS;
-        } else {
-          upto++;
-          current = subs[upto].docsAndPositionsEnum;
-          currentBase = subs[upto].slice.start;
-        }
-      }
-
-      final int doc = current.nextDoc();
-      if (doc != NO_MORE_DOCS) {
-        return this.doc = currentBase + doc;
-      } else {
-        current = null;
-      }
-    }
-  }
-
-  @Override
-  public int nextPosition() throws IOException {
-    return current.nextPosition();
-  }
-
-  @Override
-  public int startOffset() throws IOException {
-    return current.startOffset();
-  }
-
-  @Override
-  public int endOffset() throws IOException {
-    return current.endOffset();
-  }
-
-  @Override
-  public BytesRef getPayload() throws IOException {
-    return current.getPayload();
-  }
-
-  // TODO: implement bulk read more efficiently than super
-  /** Holds a {@link DocsAndPositionsEnum} along with the
-   *  corresponding {@link ReaderSlice}. */
-  public final static class EnumWithSlice {
-    EnumWithSlice() {
-    }
-
-    /** {@link DocsAndPositionsEnum} for this sub-reader. */
-    public DocsAndPositionsEnum docsAndPositionsEnum;
-
-    /** {@link ReaderSlice} describing how this sub-reader
-     *  fits into the composite reader. */
-    public ReaderSlice slice;
-    
-    @Override
-    public String toString() {
-      return slice.toString()+":"+docsAndPositionsEnum;
-    }
-  }
-  
-  @Override
-  public long cost() {
-    long cost = 0;
-    for (int i = 0; i < numSubs; i++) {
-      cost += subs[i].docsAndPositionsEnum.cost();
-    }
-    return cost;
-  }
-  
-  @Override
-  public String toString() {
-    return "MultiDocsAndPositionsEnum(" + Arrays.toString(getSubs()) + ")";
-  }
-}
-
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java
index 3d778b0..fdd6dec 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java
@@ -18,6 +18,8 @@
  */
 
 
+import org.apache.lucene.util.BytesRef;
+
 import java.io.IOException;
 import java.util.Arrays;
 
@@ -87,6 +89,26 @@
   public int docID() {
     return doc;
   }
+  
+  @Override
+  public int nextPosition() throws IOException {
+    return current.nextPosition();
+  }
+
+  @Override
+  public int startOffset() throws IOException {
+    return current.startOffset();
+  }
+
+  @Override
+  public int endOffset() throws IOException {
+    return current.endOffset();
+  }
+
+  @Override
+  public BytesRef getPayload() throws IOException {
+    return current.getPayload();
+  }
 
   @Override
   public int advance(int target) throws IOException {
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiFields.java b/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
index b25d655..c1c92bf 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
@@ -158,22 +158,22 @@
     return null;
   }
 
-  /** Returns {@link DocsAndPositionsEnum} for the specified
+  /** Returns {@link DocsEnum} for the specified
    *  field & term.  This will return null if the field or
    *  term does not exist or positions were not indexed. 
    *  @see #getTermPositionsEnum(IndexReader, Bits, String, BytesRef, int) */
-  public static DocsAndPositionsEnum getTermPositionsEnum(IndexReader r, Bits liveDocs, String field, BytesRef term) throws IOException {
-    return getTermPositionsEnum(r, liveDocs, field, term, DocsAndPositionsEnum.FLAG_OFFSETS | DocsAndPositionsEnum.FLAG_PAYLOADS);
+  public static DocsEnum getTermPositionsEnum(IndexReader r, Bits liveDocs, String field, BytesRef term) throws IOException {
+    return getTermPositionsEnum(r, liveDocs, field, term, DocsEnum.FLAG_OFFSETS | DocsEnum.FLAG_PAYLOADS);
   }
 
-  /** Returns {@link DocsAndPositionsEnum} for the specified
+  /** Returns {@link DocsEnum} for the specified
    *  field & term, with control over whether offsets and payloads are
    *  required.  Some codecs may be able to optimize
    *  their implementation when offsets and/or payloads are not
    *  required. This will return null if the field or term does not
    *  exist or positions were not indexed. See {@link
-   *  TermsEnum#docsAndPositions(Bits,DocsAndPositionsEnum,int)}. */
-  public static DocsAndPositionsEnum getTermPositionsEnum(IndexReader r, Bits liveDocs, String field, BytesRef term, int flags) throws IOException {
+   *  TermsEnum#docs(Bits,DocsEnum,int)}. */
+  public static DocsEnum getTermPositionsEnum(IndexReader r, Bits liveDocs, String field, BytesRef term, int flags) throws IOException {
     assert field != null;
     assert term != null;
     final Terms terms = getTerms(r, field);
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java
index 9e2abdd..5e40e99 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java
@@ -37,7 +37,6 @@
   private final TermsEnumWithSlice[] currentSubs; // current subs that have at least one term for this field
   private final TermsEnumWithSlice[] top;
   private final MultiDocsEnum.EnumWithSlice[] subDocs;
-  private final MultiDocsAndPositionsEnum.EnumWithSlice[] subDocsAndPositions;
 
   private BytesRef lastSeek;
   private boolean lastSeekExact;
@@ -77,13 +76,10 @@
     top = new TermsEnumWithSlice[slices.length];
     subs = new TermsEnumWithSlice[slices.length];
     subDocs = new MultiDocsEnum.EnumWithSlice[slices.length];
-    subDocsAndPositions = new MultiDocsAndPositionsEnum.EnumWithSlice[slices.length];
     for(int i=0;i<slices.length;i++) {
       subs[i] = new TermsEnumWithSlice(i, slices[i]);
       subDocs[i] = new MultiDocsEnum.EnumWithSlice();
       subDocs[i].slice = slices[i];
-      subDocsAndPositions[i] = new MultiDocsAndPositionsEnum.EnumWithSlice();
-      subDocsAndPositions[i].slice = slices[i];
     }
     currentSubs = new TermsEnumWithSlice[slices.length];
   }
@@ -331,6 +327,15 @@
 
   @Override
   public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+    return getEnum(liveDocs, reuse, flags, false);
+  }
+  
+  @Override
+  public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+    return getEnum(liveDocs, reuse, flags, true);
+  }
+  
+  private MultiDocsEnum getEnum(Bits liveDocs, DocsEnum reuse, int flags, boolean positions) throws IOException {
     MultiDocsEnum docsEnum;
     // Can only reuse if incoming enum is also a MultiDocsEnum
     if (reuse != null && reuse instanceof MultiDocsEnum) {
@@ -343,6 +348,7 @@
       docsEnum = new MultiDocsEnum(this, subs.length);
     }
     
+
     final MultiBits multiLiveDocs;
     if (liveDocs instanceof MultiBits) {
       multiLiveDocs = (MultiBits) liveDocs;
@@ -380,15 +386,24 @@
       }
 
       assert entry.index < docsEnum.subDocsEnum.length: entry.index + " vs " + docsEnum.subDocsEnum.length + "; " + subs.length;
-      final DocsEnum subDocsEnum = entry.terms.docs(b, docsEnum.subDocsEnum[entry.index], flags);
+      final DocsEnum subDocsEnum =  positions ? entry.terms.docsAndPositions(b, docsEnum.subDocsEnum[entry.index], flags) : entry.terms.docs(b, docsEnum.subDocsEnum[entry.index], flags);
       if (subDocsEnum != null) {
         docsEnum.subDocsEnum[entry.index] = subDocsEnum;
         subDocs[upto].docsEnum = subDocsEnum;
         subDocs[upto].slice = entry.subSlice;
         upto++;
       } else {
-        // should this be an error?
-        assert false : "One of our subs cannot provide a docsenum";
+        if (positions) {
+          if (entry.terms.docs(b, null, DocsEnum.FLAG_NONE) != null) {
+            // At least one of our subs does not store
+            // offsets or positions -- we can't correctly
+            // produce a MultiDocsAndPositions enum
+            return null;
+          }
+        } else {
+          // should this be an error?
+          assert false : "One of our subs cannot provide a docsenum";
+        }
       }
     }
 
@@ -399,82 +414,6 @@
     }
   }
 
-  @Override
-  public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
-    MultiDocsAndPositionsEnum docsAndPositionsEnum;
-    // Can only reuse if incoming enum is also a MultiDocsAndPositionsEnum
-    if (reuse != null && reuse instanceof MultiDocsAndPositionsEnum) {
-      docsAndPositionsEnum = (MultiDocsAndPositionsEnum) reuse;
-      // ... and was previously created w/ this MultiTermsEnum:
-      if (!docsAndPositionsEnum.canReuse(this)) {
-        docsAndPositionsEnum = new MultiDocsAndPositionsEnum(this, subs.length);
-      }
-    } else {
-      docsAndPositionsEnum = new MultiDocsAndPositionsEnum(this, subs.length);
-    }
-    
-    final MultiBits multiLiveDocs;
-    if (liveDocs instanceof MultiBits) {
-      multiLiveDocs = (MultiBits) liveDocs;
-    } else {
-      multiLiveDocs = null;
-    }
-
-    int upto = 0;
-
-    for(int i=0;i<numTop;i++) {
-
-      final TermsEnumWithSlice entry = top[i];
-
-      final Bits b;
-
-      if (multiLiveDocs != null) {
-        // Optimize for common case: requested skip docs is a
-        // congruent sub-slice of MultiBits: in this case, we
-        // just pull the liveDocs from the sub reader, rather
-        // than making the inefficient
-        // Slice(Multi(sub-readers)):
-        final MultiBits.SubResult sub = multiLiveDocs.getMatchingSub(top[i].subSlice);
-        if (sub.matches) {
-          b = sub.result;
-        } else {
-          // custom case: requested skip docs is foreign:
-          // must slice it on every access (very
-          // inefficient)
-          b = new BitsSlice(liveDocs, top[i].subSlice);
-        }
-      } else if (liveDocs != null) {
-        b = new BitsSlice(liveDocs, top[i].subSlice);
-      } else {
-        // no deletions
-        b = null;
-      }
-
-      assert entry.index < docsAndPositionsEnum.subDocsAndPositionsEnum.length: entry.index + " vs " + docsAndPositionsEnum.subDocsAndPositionsEnum.length + "; " + subs.length;
-      final DocsAndPositionsEnum subPostings = entry.terms.docsAndPositions(b, docsAndPositionsEnum.subDocsAndPositionsEnum[entry.index], flags);
-
-      if (subPostings != null) {
-        docsAndPositionsEnum.subDocsAndPositionsEnum[entry.index] = subPostings;
-        subDocsAndPositions[upto].docsAndPositionsEnum = subPostings;
-        subDocsAndPositions[upto].slice = entry.subSlice;
-        upto++;
-      } else {
-        if (entry.terms.docs(b, null, DocsEnum.FLAG_NONE) != null) {
-          // At least one of our subs does not store
-          // offsets or positions -- we can't correctly
-          // produce a MultiDocsAndPositions enum
-          return null;
-        }
-      }
-    }
-
-    if (upto == 0) {
-      return null;
-    } else {
-      return docsAndPositionsEnum.reset(subDocsAndPositions, upto);
-    }
-  }
-
   final static class TermsEnumWithSlice {
     private final ReaderSlice subSlice;
     TermsEnum terms;
diff --git a/lucene/core/src/java/org/apache/lucene/index/SortedDocValuesTermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/SortedDocValuesTermsEnum.java
index 0dedfab..0b5c728 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortedDocValuesTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortedDocValuesTermsEnum.java
@@ -17,11 +17,11 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 
+import java.io.IOException;
+
 /** Implements a {@link TermsEnum} wrapping a provided
  * {@link SortedDocValues}. */
 
@@ -119,7 +119,7 @@
   }
 
   @Override
-  public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+  public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
     throw new UnsupportedOperationException();
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesTermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesTermsEnum.java
index a48f3eb..3e4e36c 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesTermsEnum.java
@@ -17,11 +17,11 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 
+import java.io.IOException;
+
 /** Implements a {@link TermsEnum} wrapping a provided
  * {@link SortedSetDocValues}. */
 
@@ -119,7 +119,7 @@
   }
 
   @Override
-  public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+  public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
     throw new UnsupportedOperationException();
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermContext.java b/lucene/core/src/java/org/apache/lucene/index/TermContext.java
index ac80a94..262548a 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermContext.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermContext.java
@@ -17,11 +17,11 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.util.BytesRef;
+
 import java.io.IOException;
 import java.util.Arrays;
 
-import org.apache.lucene.util.BytesRef;
-
 /**
  * Maintains a {@link IndexReader} {@link TermState} view over
  * {@link IndexReader} instances containing a single term. The
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/TermsEnum.java
index 895018b..a9b4436 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermsEnum.java
@@ -17,18 +17,18 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-
 import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefIterator;
 
+import java.io.IOException;
+
 /** Iterator to seek ({@link #seekCeil(BytesRef)}, {@link
  * #seekExact(BytesRef)}) or step through ({@link
  * #next} terms to obtain frequency information ({@link
  * #docFreq}), {@link DocsEnum} or {@link
- * DocsAndPositionsEnum} for the current term ({@link
+ * DocsEnum} for the current term ({@link
  * #docs}.
  * 
  * <p>Term enumerations are always ordered by
@@ -162,20 +162,20 @@
    * @see #docs(Bits, DocsEnum, int) */
   public abstract DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException;
 
-  /** Get {@link DocsAndPositionsEnum} for the current term.
+  /** Get {@link DocsEnum} for the current term.
    *  Do not call this when the enum is unpositioned.  This
    *  method will return null if positions were not
    *  indexed.
    *  
    *  @param liveDocs unset bits are documents that should not
    *  be returned
-   *  @param reuse pass a prior DocsAndPositionsEnum for possible reuse
-   *  @see #docsAndPositions(Bits, DocsAndPositionsEnum, int) */
-  public final DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) throws IOException {
-    return docsAndPositions(liveDocs, reuse, DocsAndPositionsEnum.FLAG_OFFSETS | DocsAndPositionsEnum.FLAG_PAYLOADS);
+   *  @param reuse pass a prior DocsEnum for possible reuse
+   **/
+  public final DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse) throws IOException {
+    return docsAndPositions(liveDocs, reuse, DocsEnum.FLAG_OFFSETS | DocsEnum.FLAG_PAYLOADS);
   }
 
-  /** Get {@link DocsAndPositionsEnum} for the current term,
+  /** Get {@link DocsEnum} for the current term,
    *  with control over whether offsets and payloads are
    *  required.  Some codecs may be able to optimize their
    *  implementation when offsets and/or payloads are not required.
@@ -184,11 +184,11 @@
 
    *  @param liveDocs unset bits are documents that should not
    *  be returned
-   *  @param reuse pass a prior DocsAndPositionsEnum for possible reuse
+   *  @param reuse pass a prior DocsEnum for possible reuse
    *  @param flags specifies which optional per-position values you
-   *         require; see {@link DocsAndPositionsEnum#FLAG_OFFSETS} and 
-   *         {@link DocsAndPositionsEnum#FLAG_PAYLOADS}. */
-  public abstract DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException;
+   *         require; see {@link DocsEnum#FLAG_OFFSETS} and 
+   *         {@link DocsEnum#FLAG_PAYLOADS}. */
+  public abstract DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException;
 
   /**
    * Expert: Returns the TermsEnums internal state to position the TermsEnum
@@ -250,11 +250,6 @@
     }
       
     @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
-      throw new IllegalStateException("this method should never be called");
-    }
-      
-    @Override
     public BytesRef next() {
       return null;
     }
@@ -273,5 +268,11 @@
     public void seekExact(BytesRef term, TermState state) {
       throw new IllegalStateException("this method should never be called");
     }
+
+    @Override
+    public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags)
+        throws IOException {
+      throw new IllegalStateException("this method should never be called");
+    }
   };
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java b/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
index 6b6117d..fee72dd 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
@@ -17,12 +17,6 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
-
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
@@ -31,6 +25,12 @@
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.ToStringUtils;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
 /** A Query that matches documents matching boolean combinations of other
   * queries, e.g. {@link TermQuery}s, {@link PhraseQuery}s or other
   * BooleanQuerys.
@@ -237,7 +237,7 @@
       for (Iterator<Weight> wIter = weights.iterator(); wIter.hasNext();) {
         Weight w = wIter.next();
         BooleanClause c = cIter.next();
-        if (w.scorer(context, true, true, context.reader().getLiveDocs()) == null) {
+        if (w.scorer(context, true, true, PostingFeatures.DOCS_AND_FREQS, context.reader().getLiveDocs()) == null) {
           if (c.isRequired()) {
             fail = true;
             Explanation r = new Explanation(0.0f, "no match on required clause (" + c.getQuery().toString() + ")");
@@ -300,15 +300,16 @@
 
     @Override
     public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-        boolean topScorer, Bits acceptDocs)
+        boolean topScorer, PostingFeatures flags, Bits acceptDocs)
         throws IOException {
+
       List<Scorer> required = new ArrayList<Scorer>();
       List<Scorer> prohibited = new ArrayList<Scorer>();
       List<Scorer> optional = new ArrayList<Scorer>();
       Iterator<BooleanClause> cIter = clauses.iterator();
       for (Weight w  : weights) {
         BooleanClause c =  cIter.next();
-        Scorer subScorer = w.scorer(context, true, false, acceptDocs);
+        Scorer subScorer = w.scorer(context, true, false, flags, acceptDocs);
         if (subScorer == null) {
           if (c.isRequired()) {
             return null;
@@ -329,10 +330,12 @@
       // detect and we never do so today... (ie, we only
       // return BooleanScorer for topScorer):
 
-      // Check if we can and should return a BooleanScorer
-      // TODO: (LUCENE-4872) in some cases BooleanScorer may be faster for minNrShouldMatch
-      // but the same is even true of pure conjunctions...
-      if (!scoreDocsInOrder && topScorer && required.size() == 0 && minNrShouldMatch <= 1) {
+
+      // Check if we can return a BooleanScorer
+      // nocommit - we need to somehow detect if we need to iterate positions
+      // for now, always return BS2
+      boolean needsPositions = true;
+      if (!needsPositions && !scoreDocsInOrder && flags == PostingFeatures.DOCS_AND_FREQS && topScorer && required.size() == 0) {
         return new BooleanScorer(this, disableCoord, minNrShouldMatch, optional, prohibited, maxCoord);
       }
       
@@ -376,6 +379,15 @@
       // scorer() will return an out-of-order scorer if requested.
       return true;
     }
+
+    @Override
+    public String toString() {
+      StringBuffer sb = new StringBuffer("BooleanWeight[");
+      for (Weight weight : weights) {
+        sb.append(weight.toString()).append(",");
+      }
+      return sb.append("]").toString();
+    }
     
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java b/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
index c470290..db07798 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
@@ -17,13 +17,13 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.search.BooleanQuery.BooleanWeight;
+import org.apache.lucene.search.intervals.IntervalIterator;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
 
 /* Description from Doug Cutting (excerpted from
  * LUCENE-1483):
@@ -134,6 +134,11 @@
     
     @Override
     public float score() { return (float)score; }
+
+    @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      throw new UnsupportedOperationException("Positions are not supported on out of order collections");
+    }
     
     @Override
     public long cost() { return 1; }
@@ -325,6 +330,11 @@
   }
 
   @Override
+  public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+    throw new UnsupportedOperationException("intervals are not available if docs are matched out of order");
+  }
+
+  @Override
   public int freq() throws IOException {
     throw new UnsupportedOperationException();
   }
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanScorer2.java b/lucene/core/src/java/org/apache/lucene/search/BooleanScorer2.java
index 85fa403..0a3801a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanScorer2.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanScorer2.java
@@ -17,15 +17,15 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.search.BooleanQuery.BooleanWeight;
+import org.apache.lucene.search.intervals.IntervalIterator;
+import org.apache.lucene.search.similarities.Similarity;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 
-import org.apache.lucene.search.BooleanClause.Occur;
-import org.apache.lucene.search.BooleanQuery.BooleanWeight;
-import org.apache.lucene.search.similarities.Similarity;
-
 /* See the description in BooleanScorer.java, comparing
  * BooleanScorer & BooleanScorer2 */
 
@@ -147,11 +147,42 @@
     public int advance(int target) throws IOException {
       return scorer.advance(target);
     }
+    
+    @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      return scorer.intervals(collectIntervals);
+    }
+
+    @Override
+    public int nextPosition() throws IOException {
+      return scorer.nextPosition();
+    }
+
+    @Override
+    public int startPosition() throws IOException {
+      return scorer.startPosition();
+    }
+
+    @Override
+    public int endPosition() throws IOException {
+      return scorer.endPosition();
+    }
+
+    @Override
+    public int startOffset() throws IOException {
+      return scorer.startOffset();
+    }
+
+    @Override
+    public int endOffset() throws IOException {
+      return scorer.endOffset();
+    }
 
     @Override
     public long cost() {
       return scorer.cost();
     }
+
   }
 
   private Scorer countingDisjunctionSumScorer(final List<Scorer> scorers,
@@ -169,7 +200,7 @@
       // we pass null for coord[] since we coordinate ourselves and override score()
       return new DisjunctionSumScorer(weight, scorers.toArray(new Scorer[scorers.size()]), null) {
         @Override 
-        public float score() throws IOException {
+        public float score() {
           coordinator.nrMatchers += super.nrMatchers;
           return (float) super.score;
         }
@@ -181,7 +212,7 @@
                                               List<Scorer> requiredScorers) throws IOException {
     // each scorer from the list counted as a single matcher
     final int requiredNrMatchers = requiredScorers.size();
-    return new ConjunctionScorer(weight, requiredScorers.toArray(new Scorer[requiredScorers.size()])) {
+    return new ConjunctionScorer(weight, requiredScorers) {
       private int lastScoredDoc = -1;
       // Save the score of lastScoredDoc, so that we don't compute it more than
       // once in score().
@@ -206,7 +237,7 @@
 
   private Scorer dualConjunctionSumScorer(boolean disableCoord,
                                                 Scorer req1, Scorer req2) throws IOException { // non counting.
-    return new ConjunctionScorer(weight, new Scorer[] { req1, req2 });
+    return new ConjunctionScorer(weight, new Scorer[] { req1, req2 }, 1f);
     // All scorers match, so defaultSimilarity always has 1 as
     // the coordination factor.
     // Therefore the sum of the scores of two scorers
@@ -249,7 +280,7 @@
       if (minNrShouldMatch > 0) { // use a required disjunction scorer over the optional scorers
         return addProhibitedScorers( 
                       dualConjunctionSumScorer( // non counting
-                              disableCoord,
+                              disableCoord, 
                               requiredCountingSumScorer,
                               countingDisjunctionSumScorer(
                                       optionalScorers,
@@ -284,7 +315,7 @@
    */
   @Override
   public void score(Collector collector) throws IOException {
-    collector.setScorer(this);
+    collector.setScorer(this);    
     while ((doc = countingSumScorer.nextDoc()) != NO_MORE_DOCS) {
       collector.collect(doc);
     }
@@ -327,13 +358,43 @@
   public int advance(int target) throws IOException {
     return doc = countingSumScorer.advance(target);
   }
-  
+
+  @Override
+  public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+    return countingSumScorer.intervals(collectIntervals);
+  }
+
   @Override
   public long cost() {
     return countingSumScorer.cost();
   }
 
   @Override
+  public int nextPosition() throws IOException {
+    return countingSumScorer.nextPosition();
+  }
+
+  @Override
+  public int startPosition() throws IOException {
+    return countingSumScorer.startPosition();
+  }
+
+  @Override
+  public int endPosition() throws IOException {
+    return countingSumScorer.endPosition();
+  }
+
+  @Override
+  public int startOffset() throws IOException {
+    return countingSumScorer.startOffset();
+  }
+
+  @Override
+  public int endOffset() throws IOException {
+    return countingSumScorer.endOffset();
+  }
+
+  @Override
   public Collection<ChildScorer> getChildren() {
     ArrayList<ChildScorer> children = new ArrayList<ChildScorer>();
     for (Scorer s : optionalScorers) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
index 554da52..9b1c4a3 100644
--- a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
@@ -18,6 +18,7 @@
  */
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.util.RamUsageEstimator;
 
 import java.io.IOException;
@@ -89,10 +90,14 @@
     
     @Override
     public final int nextDoc() { throw new UnsupportedOperationException(); }
+
+    @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException { throw new UnsupportedOperationException(); }
     
     @Override
     public long cost() { return 1; }
-    }
+
+  }
 
   // A CachingCollector which caches scores
   private static final class ScoreCachingCollector extends CachingCollector {
diff --git a/lucene/core/src/java/org/apache/lucene/search/Collector.java b/lucene/core/src/java/org/apache/lucene/search/Collector.java
index 312f507..a7fa0b5 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Collector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Collector.java
@@ -21,6 +21,7 @@
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.search.Weight.PostingFeatures;
 
 /**
  * <p>Expert: Collectors are primarily meant to be used to
@@ -176,4 +177,12 @@
    */
   public abstract boolean acceptsDocsOutOfOrder();
   
+  /**
+   * Returns the posting features required by this collector. Default value is
+   * {@link PostingFeatures#DOCS_AND_FREQS}.
+   */
+  public PostingFeatures postingFeatures() {
+    return PostingFeatures.DOCS_AND_FREQS;
+  }
+  
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java b/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
index 22476e7..02f1963 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
@@ -17,126 +17,193 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.search.intervals.ConjunctionIntervalIterator;
+import org.apache.lucene.search.intervals.IntervalIterator;
+import org.apache.lucene.util.ArrayUtil;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Comparator;
 
-import org.apache.lucene.util.ArrayUtil;
-
 /** Scorer for conjunctions, sets of queries, all of which are required. */
 class ConjunctionScorer extends Scorer {
-  protected int lastDoc = -1;
-  protected final DocsAndFreqs[] docsAndFreqs;
-  private final DocsAndFreqs lead;
+  
+  private final Scorer[] scorersOrdered;
+  private final Scorer[] scorers;
+  private int lastDoc = -1;
   private final float coord;
+  final PositionQueue posQueue;
 
-  ConjunctionScorer(Weight weight, Scorer[] scorers) {
-    this(weight, scorers, 1f);
+  public ConjunctionScorer(Weight weight, Collection<Scorer> scorers) throws IOException {
+    this(weight, scorers.toArray(new Scorer[scorers.size()]), 1f);
   }
   
-  ConjunctionScorer(Weight weight, Scorer[] scorers, float coord) {
+  public ConjunctionScorer(Weight weight, Scorer[] scorers, float coord) throws IOException {
     super(weight);
+    scorersOrdered = new Scorer[scorers.length];
+    System.arraycopy(scorers, 0, scorersOrdered, 0, scorers.length);
+    this.scorers = scorers;
     this.coord = coord;
-    this.docsAndFreqs = new DocsAndFreqs[scorers.length];
+    posQueue = new PositionQueue(scorers);
+    
     for (int i = 0; i < scorers.length; i++) {
-      docsAndFreqs[i] = new DocsAndFreqs(scorers[i]);
+      if (scorers[i].nextDoc() == NO_MORE_DOCS) {
+        // If even one of the sub-scorers does not have any documents, this
+        // scorer should not attempt to do any more work.
+        lastDoc = NO_MORE_DOCS;
+        return;
+      }
     }
-    // Sort the array the first time to allow the least frequent DocsEnum to
-    // lead the matching.
-    ArrayUtil.timSort(docsAndFreqs, new Comparator<DocsAndFreqs>() {
+
+    // Sort the array the first time...
+    // We don't need to sort the array in any future calls because we know
+    // it will already start off sorted (all scorers on same doc).
+    
+    // Note that this comparator is not consistent with equals!
+    // Also we use mergeSort here to be stable (so order of Scoreres that
+    // match on first document keeps preserved):
+    ArrayUtil.timSort(scorers, new Comparator<Scorer>() { // sort the array
       @Override
-      public int compare(DocsAndFreqs o1, DocsAndFreqs o2) {
-        return Long.compare(o1.cost, o2.cost);
+      public int compare(Scorer o1, Scorer o2) {
+        return o1.docID() - o2.docID();
       }
     });
 
-    lead = docsAndFreqs[0]; // least frequent DocsEnum leads the intersection
-  }
+    // NOTE: doNext() must be called before the re-sorting of the array later on.
+    // The reason is this: assume there are 5 scorers, whose first docs are 1,
+    // 2, 3, 5, 5 respectively. Sorting (above) leaves the array as is. Calling
+    // doNext() here advances all the first scorers to 5 (or a larger doc ID
+    // they all agree on). 
+    // However, if we re-sort before doNext() is called, the order will be 5, 3,
+    // 2, 1, 5 and then doNext() will stop immediately, since the first scorer's
+    // docs equals the last one. So the invariant that after calling doNext() 
+    // all scorers are on the same doc ID is broken.
+    if (doNext() == NO_MORE_DOCS) {
+      // The scorers did not agree on any document.
+      lastDoc = NO_MORE_DOCS;
+      return;
+    }
 
-  private int doNext(int doc) throws IOException {
-    for(;;) {
-      // doc may already be NO_MORE_DOCS here, but we don't check explicitly
-      // since all scorers should advance to NO_MORE_DOCS, match, then
-      // return that value.
-      advanceHead: for(;;) {
-        for (int i = 1; i < docsAndFreqs.length; i++) {
-          // invariant: docsAndFreqs[i].doc <= doc at this point.
-
-          // docsAndFreqs[i].doc may already be equal to doc if we "broke advanceHead"
-          // on the previous iteration and the advance on the lead scorer exactly matched.
-          if (docsAndFreqs[i].doc < doc) {
-            docsAndFreqs[i].doc = docsAndFreqs[i].scorer.advance(doc);
-
-            if (docsAndFreqs[i].doc > doc) {
-              // DocsEnum beyond the current doc - break and advance lead to the new highest doc.
-              doc = docsAndFreqs[i].doc;
-              break advanceHead;
-            }
-          }
-        }
-        // success - all DocsEnums are on the same doc
-        return doc;
-      }
-      // advance head for next iteration
-      doc = lead.doc = lead.scorer.advance(doc);
+    // If first-time skip distance is any predictor of
+    // scorer sparseness, then we should always try to skip first on
+    // those scorers.
+    // Keep last scorer in it's last place (it will be the first
+    // to be skipped on), but reverse all of the others so that
+    // they will be skipped on in order of original high skip.
+    int end = scorers.length - 1;
+    int max = end >> 1;
+    for (int i = 0; i < max; i++) {
+      Scorer tmp = scorers[i];
+      int idx = end - i - 1;
+      scorers[i] = scorers[idx];
+      scorers[idx] = tmp;
     }
   }
 
+  private int doNext() throws IOException {
+    int first = 0;
+    int doc = scorers[scorers.length - 1].docID();
+    Scorer firstScorer;
+    while ((firstScorer = scorers[first]).docID() < doc) {
+      doc = firstScorer.advance(doc);
+      first = first == scorers.length - 1 ? 0 : first + 1;
+    }
+    posQueue.advanceTo(doc);
+    return doc;
+  }
+  
   @Override
   public int advance(int target) throws IOException {
-    lead.doc = lead.scorer.advance(target);
-    return lastDoc = doNext(lead.doc);
+    if (lastDoc == NO_MORE_DOCS) {
+      return lastDoc;
+    } else if (scorers[(scorers.length - 1)].docID() < target) {
+      scorers[(scorers.length - 1)].advance(target);
+    }
+    return lastDoc = doNext();
   }
 
   @Override
   public int docID() {
     return lastDoc;
   }
-
+  
   @Override
   public int nextDoc() throws IOException {
-    lead.doc = lead.scorer.nextDoc();
-    return lastDoc = doNext(lead.doc);
+    if (lastDoc == NO_MORE_DOCS) {
+      return lastDoc;
+    } else if (lastDoc == -1) {
+      return lastDoc = scorers[scorers.length - 1].docID();
+    }
+    scorers[(scorers.length - 1)].nextDoc();
+    return lastDoc = doNext();
   }
-
+  
   @Override
   public float score() throws IOException {
     // TODO: sum into a double and cast to float if we ever send required clauses to BS1
     float sum = 0.0f;
-    for (DocsAndFreqs docs : docsAndFreqs) {
-      sum += docs.scorer.score();
+    for (int i = 0; i < scorers.length; i++) {
+      sum += scorers[i].score();
     }
     return sum * coord;
   }
   
   @Override
-  public int freq() {
-    return docsAndFreqs.length;
+  public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+    if (scorersOrdered == null) {
+      throw new IllegalStateException("no positions requested for this scorer");
+    }
+      // only created if needed for this scorer - no penalty for non-positional queries
+    return new ConjunctionIntervalIterator(this, collectIntervals, pullIterators(collectIntervals, scorersOrdered));
+  }
+
+
+  @Override
+  public int freq() throws IOException {
+    return scorers.length;
+  }
+
+  @Override
+  public int nextPosition() throws IOException {
+    return posQueue.nextPosition();
+  }
+
+  @Override
+  public int startPosition() throws IOException {
+    return posQueue.startPosition();
+  }
+
+  @Override
+  public int endPosition() throws IOException {
+    return posQueue.endPosition();
+  }
+
+  @Override
+  public int startOffset() throws IOException {
+    return posQueue.startOffset();
+  }
+
+  @Override
+  public int endOffset() throws IOException {
+    return posQueue.endOffset();
   }
 
   @Override
   public long cost() {
-    return lead.scorer.cost();
+    long sum = 0;
+    for (int i = 0; i < scorers.length; i++) {
+      sum += scorers[i].cost();
+    }
+    return sum; // nocommit is this right?
   }
 
   @Override
   public Collection<ChildScorer> getChildren() {
-    ArrayList<ChildScorer> children = new ArrayList<ChildScorer>(docsAndFreqs.length);
-    for (DocsAndFreqs docs : docsAndFreqs) {
-      children.add(new ChildScorer(docs.scorer, "MUST"));
+    ArrayList<ChildScorer> children = new ArrayList<ChildScorer>(scorers.length);
+    for (Scorer scorer : scorers) {
+      children.add(new ChildScorer(scorer, "MUST"));
     }
     return children;
   }
-
-  static final class DocsAndFreqs {
-    final long cost;
-    final Scorer scorer;
-    int doc = -1;
-   
-    DocsAndFreqs(Scorer scorer) {
-      this.scorer = scorer;
-      this.cost = scorer.cost();
-    }
-  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
index 6c4fe45..e48e49c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
@@ -20,6 +20,8 @@
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Weight.PostingFeatures;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.ToStringUtils;
 
@@ -124,7 +126,7 @@
 
     @Override
     public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-        boolean topScorer, final Bits acceptDocs) throws IOException {
+        boolean topScorer, PostingFeatures flags, final Bits acceptDocs) throws IOException {
       final DocIdSetIterator disi;
       if (filter != null) {
         assert query == null;
@@ -135,7 +137,7 @@
         disi = dis.iterator();
       } else {
         assert query != null && innerWeight != null;
-        disi = innerWeight.scorer(context, scoreDocsInOrder, topScorer, acceptDocs);
+        disi = innerWeight.scorer(context, scoreDocsInOrder, topScorer, flags, acceptDocs);
       }
 
       if (disi == null) {
@@ -151,7 +153,7 @@
 
     @Override
     public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
-      final Scorer cs = scorer(context, true, false, context.reader().getLiveDocs());
+      final Scorer cs = scorer(context, true, false, PostingFeatures.DOCS_AND_FREQS, context.reader().getLiveDocs());
       final boolean exists = (cs != null && cs.advance(doc) == doc);
 
       final ComplexExplanation result = new ComplexExplanation();
@@ -205,7 +207,7 @@
     public int advance(int target) throws IOException {
       return docIdSetIterator.advance(target);
     }
-    
+
     @Override
     public long cost() {
       return docIdSetIterator.cost();
@@ -230,6 +232,11 @@
         }
         
         @Override
+        public PostingFeatures postingFeatures() {
+          return collector.postingFeatures();
+        }
+
+        @Override
         public boolean acceptsDocsOutOfOrder() {
           return collector.acceptsDocsOutOfOrder();
         }
@@ -255,6 +262,15 @@
         return super.score(collector, max, firstDocID);
       }
     }
+        
+    @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      if (docIdSetIterator instanceof Scorer) {
+        return ((Scorer) docIdSetIterator).intervals(collectIntervals);
+      } else {
+        throw new UnsupportedOperationException("positions are only supported on Scorer subclasses");
+      }
+    }
 
     @Override
     public Collection<ChildScorer> getChildren() {
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
index f61a54f..2c353aa 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
@@ -154,11 +154,11 @@
     /** Create the scorer used to score our associated DisjunctionMaxQuery */
     @Override
     public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-        boolean topScorer, Bits acceptDocs) throws IOException {
+        boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
       List<Scorer> scorers = new ArrayList<Scorer>();
       for (Weight w : weights) {
         // we will advance() subscorers
-        Scorer subScorer = w.scorer(context, true, false, acceptDocs);
+        Scorer subScorer = w.scorer(context, true, false, flags, acceptDocs);
         if (subScorer != null) {
           scorers.add(subScorer);
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
index 205e78e..31d6feb 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
@@ -16,6 +16,9 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.search.intervals.DisjunctionIntervalIterator;
+import org.apache.lucene.search.intervals.IntervalIterator;
+
 import java.io.IOException;
 
 /**
@@ -48,6 +51,7 @@
       Scorer[] subScorers) {
     super(weight, subScorers);
     this.tieBreakerMultiplier = tieBreakerMultiplier;
+        
   }
 
   /** Determine the current document score.  Initially invalid, until {@link #nextDoc()} is called the first time.
@@ -61,6 +65,7 @@
   @Override
   protected void afterNext() throws IOException {
     doc = subScorers[0].docID();
+    posQueue.advanceTo(doc);
     if (doc != NO_MORE_DOCS) {
       scoreSum = scoreMax = subScorers[0].score();
       freq = 1;
@@ -85,4 +90,9 @@
   public int freq() throws IOException {
     return freq;
   }
+  
+  @Override
+  public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+    return new DisjunctionIntervalIterator(this, collectIntervals, pullIterators(collectIntervals, subScorers));
+  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java
index 05522dd..7b8efd5 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Locale;
 
 /**
  * Base class for Scorers that score disjunctions.
@@ -30,11 +31,13 @@
   /** The document number of the current match. */
   protected int doc = -1;
   protected int numScorers;
+  protected PositionQueue posQueue;
   
   protected DisjunctionScorer(Weight weight, Scorer subScorers[]) {
     super(weight);
     this.subScorers = subScorers;
     this.numScorers = subScorers.length;
+    this.posQueue = new PositionQueue(subScorers);
     heapify();
   }
   
@@ -110,6 +113,45 @@
   }
 
   @Override
+  public int nextPosition() throws IOException {
+    //System.out.println("Advancing " + this.toString());
+    int pos = posQueue.nextPosition();
+    //System.out.println(this);
+    return pos;
+  }
+
+  @Override
+  public int startPosition() throws IOException {
+    return posQueue.startPosition();
+  }
+
+  @Override
+  public int endPosition() throws IOException {
+    return posQueue.endPosition();
+  }
+
+  @Override
+  public int startOffset() throws IOException {
+    return posQueue.startOffset();
+  }
+
+  @Override
+  public int endOffset() throws IOException {
+    return posQueue.endOffset();
+  }
+
+  @Override
+  public String toString() {
+    try {
+      return String.format(Locale.ROOT, "DisjScorer[%s] %d(%d)->%d(%d)", weight.toString(),
+                            posQueue.startPosition(),
+                            posQueue.startOffset(), posQueue.endPosition(), posQueue.endOffset());
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Override
   public long cost() {
     long sum = 0;
     for (int i = 0; i < numScorers; i++) {
@@ -172,4 +214,5 @@
    */
   // TODO: make this less horrible
   protected abstract void afterNext() throws IOException;
+
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
index 49a0675..5664236 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
@@ -17,7 +17,13 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.search.intervals.ConjunctionIntervalIterator;
+import org.apache.lucene.search.intervals.DisjunctionIntervalIterator;
+import org.apache.lucene.search.intervals.IntervalIterator;
+
 import java.io.IOException;
+import java.util.List;
+
 
 /** A Scorer for OR like queries, counterpart of <code>ConjunctionScorer</code>.
  * This Scorer implements {@link Scorer#advance(int)} and uses advance() on the given Scorers. 
@@ -29,7 +35,7 @@
 
   protected double score = Float.NaN;
   private final float[] coord;
-  
+
   /** Construct a <code>DisjunctionScorer</code>.
    * @param weight The weight to be used.
    * @param subScorers Array of at least two subscorers.
@@ -37,17 +43,16 @@
    */
   DisjunctionSumScorer(Weight weight, Scorer[] subScorers, float[] coord) throws IOException {
     super(weight, subScorers);
-
     if (numScorers <= 1) {
       throw new IllegalArgumentException("There must be at least 2 subScorers");
     }
     this.coord = coord;
   }
-  
-  @Override
+
   protected void afterNext() throws IOException {
     final Scorer sub = subScorers[0];
     doc = sub.docID();
+    posQueue.advanceTo(doc);
     if (doc != NO_MORE_DOCS) {
       score = sub.score();
       nrMatchers = 1;
@@ -81,4 +86,9 @@
   public int freq() throws IOException {
     return nrMatchers;
   }
+  
+  @Override
+  public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+    return new DisjunctionIntervalIterator(this, collectIntervals, pullIterators(collectIntervals, subScorers));
+  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java b/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java
index 909cfe0..981e9e6 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java
@@ -17,52 +17,59 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.search.PhraseQuery.TermDocsEnumFactory;
+import org.apache.lucene.search.intervals.BlockIntervalIterator;
+import org.apache.lucene.search.intervals.IntervalIterator;
+import org.apache.lucene.search.intervals.TermIntervalIterator;
+import org.apache.lucene.search.similarities.Similarity;
+
 import java.io.IOException;
 import java.util.Arrays;
 
-import org.apache.lucene.index.*;
-import org.apache.lucene.search.similarities.Similarity;
-
 final class ExactPhraseScorer extends Scorer {
   private final int endMinus1;
-
+  
   private final static int CHUNK = 4096;
-
+  
   private int gen;
   private final int[] counts = new int[CHUNK];
   private final int[] gens = new int[CHUNK];
-
+  
   boolean noDocs;
   private final long cost;
 
   private final static class ChunkState {
-    final DocsAndPositionsEnum posEnum;
+    final TermDocsEnumFactory factory;
+    final DocsEnum posEnum;
     final int offset;
     final boolean useAdvance;
     int posUpto;
     int posLimit;
     int pos;
     int lastPos;
-
-    public ChunkState(DocsAndPositionsEnum posEnum, int offset, boolean useAdvance) {
+    
+    public ChunkState(TermDocsEnumFactory factory, DocsEnum posEnum, int offset,
+        boolean useAdvance) throws IOException {
+      this.factory = factory;
       this.posEnum = posEnum;
       this.offset = offset;
       this.useAdvance = useAdvance;
     }
   }
-
+  
   private final ChunkState[] chunkStates;
-
+  
   private int docID = -1;
   private int freq;
-
-  private final Similarity.SimScorer docScorer;
   
+  private final Similarity.SimScorer docScorer;
+
   ExactPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
                     Similarity.SimScorer docScorer) throws IOException {
     super(weight);
     this.docScorer = docScorer;
-
+    
     chunkStates = new ChunkState[postings.length];
 
     endMinus1 = postings.length-1;
@@ -76,31 +83,33 @@
       // costly, so, if the relative freq of the 2nd
       // rarest term is not that much (> 1/5th) rarer than
       // the first term, then we just use .nextDoc() when
-      // ANDing.  This buys ~15% gain for phrases where
+      // ANDing. This buys ~15% gain for phrases where
       // freq of rarest 2 terms is close:
-      final boolean useAdvance = postings[i].docFreq > 5*postings[0].docFreq;
-      chunkStates[i] = new ChunkState(postings[i].postings, -postings[i].position, useAdvance);
-      if (i > 0 && postings[i].postings.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
+      final boolean useAdvance = postings[i].docFreq > 5 * postings[0].docFreq;
+      chunkStates[i] = new ChunkState(postings[i].factory, postings[i].postings,
+          -postings[i].position, useAdvance);
+      if (i > 0
+          && postings[i].postings.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
         noDocs = true;
         return;
       }
     }
   }
-
+  
   @Override
   public int nextDoc() throws IOException {
-    while(true) {
-
+    while (true) {
+      
       // first (rarest) term
       final int doc = chunkStates[0].posEnum.nextDoc();
       if (doc == DocIdSetIterator.NO_MORE_DOCS) {
         docID = doc;
         return doc;
       }
-
+      
       // not-first terms
       int i = 1;
-      while(i < chunkStates.length) {
+      while (i < chunkStates.length) {
         final ChunkState cs = chunkStates[i];
         int doc2 = cs.posEnum.docID();
         if (cs.useAdvance) {
@@ -109,7 +118,7 @@
           }
         } else {
           int iter = 0;
-          while(doc2 < doc) {
+          while (doc2 < doc) {
             // safety net -- fallback to .advance if we've
             // done too many .nextDocs
             if (++iter == 50) {
@@ -125,12 +134,12 @@
         }
         i++;
       }
-
+      
       if (i == chunkStates.length) {
         // this doc has all the terms -- now test whether
         // phrase occurs
         docID = doc;
-
+        
         freq = phraseFreq();
         if (freq != 0) {
           return docID;
@@ -138,22 +147,22 @@
       }
     }
   }
-
+  
   @Override
   public int advance(int target) throws IOException {
-
+    
     // first term
     int doc = chunkStates[0].posEnum.advance(target);
     if (doc == DocIdSetIterator.NO_MORE_DOCS) {
       docID = DocIdSetIterator.NO_MORE_DOCS;
       return doc;
     }
-
-    while(true) {
+    
+    while (true) {
       
       // not-first terms
       int i = 1;
-      while(i < chunkStates.length) {
+      while (i < chunkStates.length) {
         int doc2 = chunkStates[i].posEnum.docID();
         if (doc2 < doc) {
           doc2 = chunkStates[i].posEnum.advance(doc);
@@ -163,7 +172,7 @@
         }
         i++;
       }
-
+      
       if (i == chunkStates.length) {
         // this doc has all the terms -- now test whether
         // phrase occurs
@@ -173,7 +182,7 @@
           return docID;
         }
       }
-
+      
       doc = chunkStates[0].posEnum.nextDoc();
       if (doc == DocIdSetIterator.NO_MORE_DOCS) {
         docID = doc;
@@ -181,63 +190,63 @@
       }
     }
   }
-
+  
   @Override
   public String toString() {
     return "ExactPhraseScorer(" + weight + ")";
   }
-
+  
   @Override
   public int freq() {
     return freq;
   }
-
+  
   @Override
   public int docID() {
     return docID;
   }
-
+  
   @Override
   public float score() {
     return docScorer.score(docID, freq);
   }
-
+  
   private int phraseFreq() throws IOException {
-
+    
     freq = 0;
-
+    
     // init chunks
-    for(int i=0;i<chunkStates.length;i++) {
+    for (int i = 0; i < chunkStates.length; i++) {
       final ChunkState cs = chunkStates[i];
       cs.posLimit = cs.posEnum.freq();
       cs.pos = cs.offset + cs.posEnum.nextPosition();
       cs.posUpto = 1;
       cs.lastPos = -1;
     }
-
+    
     int chunkStart = 0;
     int chunkEnd = CHUNK;
-
+    
     // process chunk by chunk
     boolean end = false;
-
+    
     // TODO: we could fold in chunkStart into offset and
     // save one subtract per pos incr
-
-    while(!end) {
-
+    
+    while (!end) {
+      
       gen++;
-
+      
       if (gen == 0) {
         // wraparound
         Arrays.fill(gens, 0);
         gen++;
       }
-
+      
       // first term
       {
         final ChunkState cs = chunkStates[0];
-        while(cs.pos < chunkEnd) {
+        while (cs.pos < chunkEnd) {
           if (cs.pos > cs.lastPos) {
             cs.lastPos = cs.pos;
             final int posIndex = cs.pos - chunkStart;
@@ -245,7 +254,7 @@
             assert gens[posIndex] != gen;
             gens[posIndex] = gen;
           }
-
+          
           if (cs.posUpto == cs.posLimit) {
             end = true;
             break;
@@ -254,13 +263,13 @@
           cs.pos = cs.offset + cs.posEnum.nextPosition();
         }
       }
-
+      
       // middle terms
       boolean any = true;
-      for(int t=1;t<endMinus1;t++) {
+      for (int t = 1; t < endMinus1; t++) {
         final ChunkState cs = chunkStates[t];
         any = false;
-        while(cs.pos < chunkEnd) {
+        while (cs.pos < chunkEnd) {
           if (cs.pos > cs.lastPos) {
             cs.lastPos = cs.pos;
             final int posIndex = cs.pos - chunkStart;
@@ -270,7 +279,7 @@
               any = true;
             }
           }
-
+          
           if (cs.posUpto == cs.posLimit) {
             end = true;
             break;
@@ -278,32 +287,33 @@
           cs.posUpto++;
           cs.pos = cs.offset + cs.posEnum.nextPosition();
         }
-
+        
         if (!any) {
           break;
         }
       }
-
+      
       if (!any) {
         // petered out for this chunk
         chunkStart += CHUNK;
         chunkEnd += CHUNK;
         continue;
       }
-
+      
       // last term
-
+      
       {
         final ChunkState cs = chunkStates[endMinus1];
-        while(cs.pos < chunkEnd) {
+        while (cs.pos < chunkEnd) {
           if (cs.pos > cs.lastPos) {
             cs.lastPos = cs.pos;
             final int posIndex = cs.pos - chunkStart;
-            if (posIndex >= 0 && gens[posIndex] == gen && counts[posIndex] == endMinus1) {
+            if (posIndex >= 0 && gens[posIndex] == gen
+                && counts[posIndex] == endMinus1) {
               freq++;
             }
           }
-
+          
           if (cs.posUpto == cs.posLimit) {
             end = true;
             break;
@@ -312,16 +322,27 @@
           cs.pos = cs.offset + cs.posEnum.nextPosition();
         }
       }
-
+      
       chunkStart += CHUNK;
       chunkEnd += CHUNK;
     }
-
+    
     return freq;
   }
 
   @Override
+  public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+    TermIntervalIterator[] posIters = new TermIntervalIterator[chunkStates.length];
+    DocsEnum[] enums = new DocsEnum[chunkStates.length];
+    for (int i = 0; i < chunkStates.length; i++) {
+      posIters[i] = new TermIntervalIterator(this, enums[i] = chunkStates[i].factory.docsAndPositionsEnum(), false, collectIntervals);
+    }
+    return new SloppyPhraseScorer.AdvancingIntervalIterator(this, collectIntervals, enums, new BlockIntervalIterator(this, collectIntervals, posIters));
+  }
+
+  @Override
   public long cost() {
     return cost;
   }
+
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java b/lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java
index fcafbbe..1df799c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java
@@ -17,14 +17,6 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.WeakHashMap;
-
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DocTermOrds;
@@ -47,6 +39,14 @@
 import org.apache.lucene.util.packed.MonotonicAppendingLongBuffer;
 import org.apache.lucene.util.packed.PackedInts;
 
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.WeakHashMap;
+
 /**
  * Expert: The default cache implementation, storing all values in memory.
  * A WeakHashMap is used for storage.
@@ -925,6 +925,7 @@
       if (ord < 0) {
         throw new IllegalArgumentException("ord must be >=0 (got ord=" + ord + ")");
       }
+
       bytes.fill(ret, termOrdToBytesOffset.get(ord));
     }
   }
diff --git a/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java b/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java
index bf2fdde..7c41a5d 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java
@@ -20,6 +20,8 @@
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Weight.PostingFeatures;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.ToStringUtils;
 
@@ -122,7 +124,7 @@
 
       // return a filtering scorer
       @Override
-      public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, final Bits acceptDocs) throws IOException {
+      public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
         assert filter != null;
 
         final DocIdSet filterDocIdSet = filter.getDocIdSet(context, acceptDocs);
@@ -130,7 +132,7 @@
           // this means the filter does not accept any documents.
           return null;
         }
-        return strategy.filteredScorer(context, scoreDocsInOrder, topScorer, weight, filterDocIdSet);
+        return strategy.filteredScorer(context, scoreDocsInOrder, topScorer, weight, filterDocIdSet, flags);
         
       }
     };
@@ -192,7 +194,6 @@
       }
       
     }
-
     @Override
     public int docID() {
       return scorerDoc;
@@ -212,6 +213,12 @@
     }
 
     @Override
+    public IntervalIterator intervals(boolean collectIntervals)
+        throws IOException {
+      return scorer.intervals(collectIntervals);
+    }
+
+    @Override
     public long cost() {
       return scorer.cost();
     }
@@ -311,6 +318,12 @@
     }
 
     @Override
+    public IntervalIterator intervals(boolean collectIntervals)
+        throws IOException {
+      return scorer.intervals(collectIntervals);
+    }
+
+    @Override
     public long cost() {
       return Math.min(primary.cost(), secondary.cost());
     }
@@ -493,13 +506,14 @@
      *          be called.
      * @param weight the {@link FilteredQuery} {@link Weight} to create the filtered scorer.
      * @param docIdSet the filter {@link DocIdSet} to apply
+     * @param flags the low level {@link PostingFeatures} for this scorer.
      * @return a filtered scorer
      * 
      * @throws IOException if an {@link IOException} occurs
      */
     public abstract Scorer filteredScorer(AtomicReaderContext context,
         boolean scoreDocsInOrder, boolean topScorer, Weight weight,
-        DocIdSet docIdSet) throws IOException;
+        DocIdSet docIdSet, PostingFeatures flags) throws IOException;
   }
   
   /**
@@ -513,7 +527,7 @@
   public static class RandomAccessFilterStrategy extends FilterStrategy {
 
     @Override
-    public Scorer filteredScorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Weight weight, DocIdSet docIdSet) throws IOException {
+    public Scorer filteredScorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Weight weight, DocIdSet docIdSet, PostingFeatures flags) throws IOException {
       final DocIdSetIterator filterIter = docIdSet.iterator();
       if (filterIter == null) {
         // this means the filter does not accept any documents.
@@ -530,12 +544,12 @@
       final boolean useRandomAccess = (filterAcceptDocs != null && (useRandomAccess(filterAcceptDocs, firstFilterDoc)));
       if (useRandomAccess) {
         // if we are using random access, we return the inner scorer, just with other acceptDocs
-        return weight.scorer(context, scoreDocsInOrder, topScorer, filterAcceptDocs);
+        return weight.scorer(context, scoreDocsInOrder, topScorer, flags, filterAcceptDocs);
       } else {
         assert firstFilterDoc > -1;
         // we are gonna advance() this scorer, so we set inorder=true/toplevel=false
         // we pass null as acceptDocs, as our filter has already respected acceptDocs, no need to do twice
-        final Scorer scorer = weight.scorer(context, true, false, null);
+        final Scorer scorer = weight.scorer(context, true, false, flags, null);
         // TODO once we have way to figure out if we use RA or LeapFrog we can remove this scorer
         return (scorer == null) ? null : new PrimaryAdvancedLeapFrogScorer(weight, firstFilterDoc, filterIter, scorer);
       }
@@ -569,7 +583,7 @@
     @Override
     public Scorer filteredScorer(AtomicReaderContext context,
         boolean scoreDocsInOrder, boolean topScorer, Weight weight,
-        DocIdSet docIdSet) throws IOException {
+        DocIdSet docIdSet, PostingFeatures flags) throws IOException {
       final DocIdSetIterator filterIter = docIdSet.iterator();
       if (filterIter == null) {
         // this means the filter does not accept any documents.
@@ -577,7 +591,7 @@
       }
       // we are gonna advance() this scorer, so we set inorder=true/toplevel=false
       // we pass null as acceptDocs, as our filter has already respected acceptDocs, no need to do twice
-      final Scorer scorer = weight.scorer(context, true, false, null);
+      final Scorer scorer = weight.scorer(context, true, false, flags, null);
       if (scorerFirst) {
         return (scorer == null) ? null : new LeapFrogScorer(weight, scorer, filterIter, scorer);  
       } else {
@@ -603,13 +617,13 @@
   private static final class QueryFirstFilterStrategy extends FilterStrategy {
     @Override
     public Scorer filteredScorer(final AtomicReaderContext context,
-        boolean scoreDocsInOrder, boolean topScorer, Weight weight,
-        DocIdSet docIdSet) throws IOException {
+        boolean scoreDocsInOrder, boolean topScorer, Weight weight, 
+        DocIdSet docIdSet, PostingFeatures flags) throws IOException {
       Bits filterAcceptDocs = docIdSet.bits();
       if (filterAcceptDocs == null) {
-        return LEAP_FROG_QUERY_FIRST_STRATEGY.filteredScorer(context, scoreDocsInOrder, topScorer, weight, docIdSet);
+        return LEAP_FROG_QUERY_FIRST_STRATEGY.filteredScorer(context, scoreDocsInOrder, topScorer, weight, docIdSet, flags);
       }
-      final Scorer scorer = weight.scorer(context, true, false, null);
+      final Scorer scorer = weight.scorer(context, true, false, flags, null);
       return scorer == null ? null : new QueryFirstScorer(weight,
           filterAcceptDocs, scorer);
     }
diff --git a/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java b/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
index 8e2bf8b..43e9b53 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
@@ -22,7 +22,7 @@
 import java.util.Comparator;
 import java.util.List;
 
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermState;
@@ -277,8 +277,8 @@
   }
   
   @Override
-  public DocsAndPositionsEnum docsAndPositions(Bits liveDocs,
-                                               DocsAndPositionsEnum reuse, int flags) throws IOException {
+  public DocsEnum docsAndPositions(Bits liveDocs,
+                                               DocsEnum reuse, int flags) throws IOException {
     return actualEnum.docsAndPositions(liveDocs, reuse, flags);
   }
   
diff --git a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
index 90310e8..be15c21 100644
--- a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
+++ b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
@@ -43,6 +43,7 @@
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermContext;
 import org.apache.lucene.index.Terms;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.store.NIOFSDirectory;    // javadoc
@@ -607,7 +608,7 @@
         // continue with the following leaf
         continue;
       }
-      Scorer scorer = weight.scorer(ctx, !collector.acceptsDocsOutOfOrder(), true, ctx.reader().getLiveDocs());
+      Scorer scorer = weight.scorer(ctx, !collector.acceptsDocsOutOfOrder(), true, collector.postingFeatures(), ctx.reader().getLiveDocs());
       if (scorer != null) {
         try {
           scorer.score(collector);
@@ -802,6 +803,11 @@
       }
 
       @Override
+      public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+        return null;
+      }
+
+      @Override
       public long cost() {
         return 1;
       }
diff --git a/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java b/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
index 0737a7c..497762d 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
@@ -20,6 +20,7 @@
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.util.ToStringUtils;
 import org.apache.lucene.util.Bits;
 
@@ -79,6 +80,11 @@
     }
 
     @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      throw new UnsupportedOperationException("MatchAllDocsQuery doesn't support IntervalIterators");
+    }
+
+    @Override
     public long cost() {
       return maxDoc;
     }
@@ -115,7 +121,7 @@
 
     @Override
     public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-        boolean topScorer, Bits acceptDocs) throws IOException {
+        boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
       return new MatchAllScorer(context.reader(), acceptDocs, this, queryWeight);
     }
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java b/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
index c46ec48..d13feaf 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
@@ -1,6 +1,6 @@
 package org.apache.lucene.search;
 
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -17,12 +17,8 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.*;
-
-import org.apache.lucene.index.AtomicReaderContext;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexReaderContext;
@@ -31,14 +27,30 @@
 import org.apache.lucene.index.TermState;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.similarities.Similarity.SimScorer;
+import org.apache.lucene.search.PhraseQuery.TermDocsEnumFactory;
+import org.apache.lucene.search.Weight.PostingFeatures;
 import org.apache.lucene.search.similarities.Similarity;
+
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IntroSorter;
 import org.apache.lucene.util.PriorityQueue;
 import org.apache.lucene.util.ToStringUtils;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Map;
+import java.util.Set;
+
 /**
  * MultiPhraseQuery is a generalized version of PhraseQuery, with an added
  * method {@link #add(Term[])}.
@@ -175,7 +187,7 @@
 
     @Override
     public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-        boolean topScorer, Bits acceptDocs) throws IOException {
+        boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
       assert !termArrays.isEmpty();
       final AtomicReader reader = context.reader();
       final Bits liveDocs = acceptDocs;
@@ -193,9 +205,9 @@
       for (int pos=0; pos<postingsFreqs.length; pos++) {
         Term[] terms = termArrays.get(pos);
 
-        final DocsAndPositionsEnum postingsEnum;
+        final DocsEnum postingsEnum;
         int docFreq;
-
+        TermDocsEnumFactory factory;
         if (terms.length > 1) {
           postingsEnum = new UnionDocsAndPositionsEnum(liveDocs, context, terms, termContexts, termsEnum);
 
@@ -217,6 +229,7 @@
             // None of the terms are in this reader
             return null;
           }
+          factory = new MultiTermDocsEnumFactory(liveDocs, context, terms, termContexts, termsEnum, flags);
         } else {
           final Term term = terms[0];
           TermState termState = termContexts.get(term).get(context.ord);
@@ -233,10 +246,10 @@
             throw new IllegalStateException("field \"" + term.field() + "\" was indexed without position data; cannot run PhraseQuery (term=" + term.text() + ")");
           }
 
-          docFreq = termsEnum.docFreq();
+          factory = new TermDocsEnumFactory(term.bytes(), termState, termsEnum, flags, acceptDocs);
         }
-
-        postingsFreqs[pos] = new PhraseQuery.PostingsAndFreq(postingsEnum, docFreq, positions.get(pos).intValue(), terms);
+        
+        postingsFreqs[pos] = new PhraseQuery.PostingsAndFreq(postingsEnum, factory, termsEnum.docFreq() , positions.get(pos).intValue(), terms);
       }
 
       // sort by increasing docFreq order
@@ -258,7 +271,7 @@
 
     @Override
     public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
-      Scorer scorer = scorer(context, true, false, context.reader().getLiveDocs());
+      Scorer scorer = scorer(context, true, false, PostingFeatures.POSITIONS, context.reader().getLiveDocs());
       if (scorer != null) {
         int newDoc = scorer.advance(doc);
         if (newDoc == doc) {
@@ -402,6 +415,27 @@
     }
     return true;
   }
+
+  private static class MultiTermDocsEnumFactory extends TermDocsEnumFactory {
+
+    AtomicReaderContext context;
+    Term[] terms;
+    Map<Term, TermContext> termContexts;
+
+    MultiTermDocsEnumFactory(Bits liveDocs, AtomicReaderContext context, Term[] terms,
+                             Map<Term,TermContext> termContexts, TermsEnum termsEnum, PostingFeatures flags) throws IOException {
+      super(termsEnum, flags, liveDocs);
+      this.context = context;
+      this.terms = terms;
+      this.termContexts = termContexts;
+    }
+
+    @Override
+    public DocsEnum docsAndPositionsEnum() throws IOException {
+      return new UnionDocsAndPositionsEnum(liveDocs, context, terms, termContexts, termsEnum, flags);
+    }
+
+  }
 }
 
 /**
@@ -409,15 +443,15 @@
  */
 
 // TODO: if ever we allow subclassing of the *PhraseScorer
-class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
+class UnionDocsAndPositionsEnum extends DocsEnum {
 
-  private static final class DocsQueue extends PriorityQueue<DocsAndPositionsEnum> {
-    DocsQueue(List<DocsAndPositionsEnum> docsEnums) throws IOException {
+  private static final class DocsQueue extends PriorityQueue<DocsEnum> {
+    DocsQueue(List<DocsEnum> docsEnums) throws IOException {
       super(docsEnums.size());
 
-      Iterator<DocsAndPositionsEnum> i = docsEnums.iterator();
+      Iterator<DocsEnum> i = docsEnums.iterator();
       while (i.hasNext()) {
-        DocsAndPositionsEnum postings = i.next();
+        DocsEnum postings = i.next();
         if (postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
           add(postings);
         }
@@ -425,30 +459,46 @@
     }
 
     @Override
-    public final boolean lessThan(DocsAndPositionsEnum a, DocsAndPositionsEnum b) {
+    public final boolean lessThan(DocsEnum a, DocsEnum b) {
       return a.docID() < b.docID();
     }
   }
 
-  private static final class IntQueue {
-    private int _arraySize = 16;
+  // TODO: Reimplement this as int[_arraySize * 3], storing position at i * 3,
+  // startOffset at i * 3 + 1 and endOffset at i * 3 + 2.  Will need to also
+  // implement a new SorterTemplate to sort the array.
+
+  private static final class PositionQueue {
+    private int _arraySize = 48;
     private int _index = 0;
     private int _lastIndex = 0;
     private int[] _array = new int[_arraySize];
     
-    final void add(int i) {
-      if (_lastIndex == _arraySize)
+    final void add(int pos, int start, int end) {
+      if (_lastIndex * 3 == _arraySize)
         growArray();
 
-      _array[_lastIndex++] = i;
+      _array[_lastIndex * 3] = pos;
+      _array[_lastIndex * 3 + 1] = start;
+      _array[_lastIndex * 3 + 2] = end;
+      _lastIndex += 1;
     }
 
     final int next() {
-      return _array[_index++];
+      return _array[_index++ * 3];
+    }
+
+    final int startOffset() {
+      return _array[(_index - 1) * 3 + 1];
+    }
+
+    final int endOffset() {
+      return _array[(_index - 1) * 3 + 2];
     }
 
     final void sort() {
-      Arrays.sort(_array, _index, _lastIndex);
+      //Arrays.sort(_array, _index, _lastIndex);
+      sorter.sort(_index, _lastIndex - 1);
     }
 
     final void clear() {
@@ -466,16 +516,55 @@
       _array = newArray;
       _arraySize *= 2;
     }
+
+    private IntroSorter sorter = new IntroSorter() {
+      private int pivot;
+
+      @Override
+      protected void swap(int i, int j) {
+        int ti = _array[i * 3];
+        int ts = _array[i * 3 + 1];
+        int te = _array[i * 3 + 2];
+        _array[i * 3] = _array[j * 3];
+        _array[i * 3 + 1] = _array[j * 3 + 1];
+        _array[i * 3 + 2] = _array[j * 3 + 2];
+        _array[j * 3] = ti;
+        _array[j * 3 + 1] = ts;
+        _array[j * 3 + 2] = te;
+      }
+
+      @Override
+      protected int compare(int i, int j) {
+        return _array[i * 3] - _array[j * 3];
+      }
+
+      @Override
+      protected void setPivot(int i) {
+        pivot = i;
+      }
+
+      @Override
+      protected int comparePivot(int j) {
+        return pivot - _array[j * 3];
+      }
+    };
   }
 
   private int _doc;
   private int _freq;
   private DocsQueue _queue;
-  private IntQueue _posList;
+  private PositionQueue _posList;
+  private int posPending;
   private long cost;
 
-  public UnionDocsAndPositionsEnum(Bits liveDocs, AtomicReaderContext context, Term[] terms, Map<Term,TermContext> termContexts, TermsEnum termsEnum) throws IOException {
-    List<DocsAndPositionsEnum> docsEnums = new LinkedList<DocsAndPositionsEnum>();
+  public UnionDocsAndPositionsEnum(Bits liveDocs, AtomicReaderContext context, Term[] terms,
+                                   Map<Term,TermContext> termContexts, TermsEnum termsEnum) throws IOException {
+    this(liveDocs, context, terms, termContexts, termsEnum, PostingFeatures.POSITIONS);
+  }
+
+  public UnionDocsAndPositionsEnum(Bits liveDocs, AtomicReaderContext context, Term[] terms,
+                                     Map<Term,TermContext> termContexts, TermsEnum termsEnum, PostingFeatures flags) throws IOException {
+    List<DocsEnum> docsEnums = new LinkedList<DocsEnum>();
     for (int i = 0; i < terms.length; i++) {
       final Term term = terms[i];
       TermState termState = termContexts.get(term).get(context.ord);
@@ -484,7 +573,7 @@
         continue;
       }
       termsEnum.seekExact(term.bytes(), termState);
-      DocsAndPositionsEnum postings = termsEnum.docsAndPositions(liveDocs, null, DocsEnum.FLAG_NONE);
+      DocsEnum postings = termsEnum.docsAndPositions(liveDocs, null, DocsEnum.FLAG_NONE);
       if (postings == null) {
         // term does exist, but has no positions
         throw new IllegalStateException("field \"" + term.field() + "\" was indexed without position data; cannot run PhraseQuery (term=" + term.text() + ")");
@@ -494,7 +583,7 @@
     }
 
     _queue = new DocsQueue(docsEnums);
-    _posList = new IntQueue();
+    _posList = new PositionQueue();
   }
 
   @Override
@@ -510,13 +599,13 @@
     _doc = _queue.top().docID();
 
     // merge sort all positions together
-    DocsAndPositionsEnum postings;
+    DocsEnum postings;
     do {
       postings = _queue.top();
 
       final int freq = postings.freq();
       for (int i = 0; i < freq; i++) {
-        _posList.add(postings.nextPosition());
+        _posList.add(postings.nextPosition(), postings.startOffset(), postings.endOffset());
       }
 
       if (postings.nextDoc() != NO_MORE_DOCS) {
@@ -528,23 +617,27 @@
 
     _posList.sort();
     _freq = _posList.size();
+    posPending = _freq;
 
     return _doc;
   }
 
   @Override
   public int nextPosition() {
+    if (posPending == 0)
+      return NO_MORE_POSITIONS;
+    posPending--;
     return _posList.next();
   }
 
   @Override
   public int startOffset() {
-    return -1;
+    return _posList.startOffset();
   }
 
   @Override
   public int endOffset() {
-    return -1;
+    return _posList.endOffset();
   }
 
   @Override
@@ -555,7 +648,7 @@
   @Override
   public final int advance(int target) throws IOException {
     while (_queue.top() != null && target > _queue.top().docID()) {
-      DocsAndPositionsEnum postings = _queue.pop();
+      DocsEnum postings = _queue.pop();
       if (postings.advance(target) != NO_MORE_DOCS) {
         _queue.add(postings);
       }
@@ -564,7 +657,7 @@
   }
 
   @Override
-  public final int freq() {
+  public final int freq() throws IOException {
     return _freq;
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java b/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java
index c975b01..7faa453 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java
@@ -17,8 +17,10 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.Term;
+
 import java.io.IOException;
-import org.apache.lucene.index.*;
 
 /**
  * Position of a term in a document that takes into account the term offset within the phrase. 
@@ -29,13 +31,13 @@
   int count;            // remaining pos in this doc
   int offset;           // position in phrase
   final int ord;                                  // unique across all PhrasePositions instances
-  final DocsAndPositionsEnum postings;            // stream of docs & positions
+  final DocsEnum postings;            // stream of docs & positions
   PhrasePositions next;                           // used to make lists
   int rptGroup = -1; // >=0 indicates that this is a repeating PP
   int rptInd; // index in the rptGroup
   final Term[] terms; // for repetitions initialization 
 
-  PhrasePositions(DocsAndPositionsEnum postings, int o, int ord, Term[] terms) {
+  PhrasePositions(DocsEnum postings, int o, int ord, Term[] terms) {
     this.postings = postings;
     offset = o;
     this.ord = ord;
@@ -44,6 +46,7 @@
 
   final boolean next() throws IOException {  // increments to next doc
     doc = postings.nextDoc();
+    
     if (doc == DocIdSetIterator.NO_MORE_DOCS) {
       return false;
     }
@@ -80,10 +83,14 @@
   /** for debug purposes */
   @Override
   public String toString() {
-    String s = "d:"+doc+" o:"+offset+" p:"+position+" c:"+count;
+    String s = "d:"+doc+" offset:"+offset+" position:"+position+" c:"+count;
     if (rptGroup >=0 ) {
       s += " rpt:"+rptGroup+",i"+rptInd;
     }
+    s += " t: [" + terms[0];
+    for (int i = 1; i < terms.length; i++)
+      s += "," + terms[1];
+    s += "]";
     return s;
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
index 6a32d51..4738a3c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
@@ -1,6 +1,6 @@
 package org.apache.lucene.search;
 
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -17,28 +17,29 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Set;
-
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.IndexReaderContext;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermContext;
 import org.apache.lucene.index.TermState;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.similarities.Similarity.SimScorer;
+import org.apache.lucene.search.Weight.PostingFeatures;
 import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.ToStringUtils;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Set;
+
 /** A Query that matches documents containing a particular sequence of terms.
  * A PhraseQuery is built by QueryParser for input like <code>"new york"</code>.
  * 
@@ -133,13 +134,15 @@
   }
 
   static class PostingsAndFreq implements Comparable<PostingsAndFreq> {
-    final DocsAndPositionsEnum postings;
+    final TermDocsEnumFactory factory;
+    final DocsEnum postings;
     final int docFreq;
     final int position;
     final Term[] terms;
     final int nTerms; // for faster comparisons
 
-    public PostingsAndFreq(DocsAndPositionsEnum postings, int docFreq, int position, Term... terms) {
+    public PostingsAndFreq(DocsEnum postings, TermDocsEnumFactory factory, int docFreq, int position, Term... terms) throws IOException {
+      this.factory = factory;
       this.postings = postings;
       this.docFreq = docFreq;
       this.position = position;
@@ -241,7 +244,7 @@
 
     @Override
     public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-        boolean topScorer, Bits acceptDocs) throws IOException {
+        boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
       assert !terms.isEmpty();
       final AtomicReader reader = context.reader();
       final Bits liveDocs = acceptDocs;
@@ -263,7 +266,7 @@
           return null;
         }
         te.seekExact(t.bytes(), state);
-        DocsAndPositionsEnum postingsEnum = te.docsAndPositions(liveDocs, null, DocsEnum.FLAG_NONE);
+        DocsEnum postingsEnum = te.docsAndPositions(liveDocs, null, DocsEnum.FLAG_NONE);
 
         // PhraseQuery on a field that did not index
         // positions.
@@ -272,7 +275,8 @@
           // term does exist, but has no positions
           throw new IllegalStateException("field \"" + t.field() + "\" was indexed without position data; cannot run PhraseQuery (term=" + t.text() + ")");
         }
-        postingsFreqs[i] = new PostingsAndFreq(postingsEnum, te.docFreq(), positions.get(i).intValue(), t);
+        TermDocsEnumFactory factory = new TermDocsEnumFactory(t.bytes(), state, te, flags, acceptDocs);
+        postingsFreqs[i] = new PostingsAndFreq(postingsEnum, factory, te.docFreq(), positions.get(i).intValue(), t);
       }
 
       // sort by increasing docFreq order
@@ -300,7 +304,7 @@
 
     @Override
     public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
-      Scorer scorer = scorer(context, true, false, context.reader().getLiveDocs());
+      Scorer scorer = scorer(context, true, false, PostingFeatures.POSITIONS, context.reader().getLiveDocs());
       if (scorer != null) {
         int newDoc = scorer.advance(doc);
         if (newDoc == doc) {
@@ -398,4 +402,33 @@
       ^ positions.hashCode();
   }
 
+  static class TermDocsEnumFactory {
+    protected final TermsEnum termsEnum;
+    protected final Bits liveDocs;
+    protected final PostingFeatures flags;
+
+    private final BytesRef term;
+    private final TermState termState;
+    
+    TermDocsEnumFactory(TermsEnum termsEnum, PostingFeatures flags, Bits liveDocs) {
+      this(null, null, termsEnum, flags, liveDocs);
+    }
+    
+    TermDocsEnumFactory(BytesRef term, TermState termState, TermsEnum termsEnum, PostingFeatures flags,  Bits liveDocs) {
+      this.termsEnum = termsEnum;
+      this.termState = termState;
+      this.liveDocs = liveDocs;
+      this.term = term;
+      this.flags = flags;
+    }
+    
+    
+    public DocsEnum docsAndPositionsEnum()
+        throws IOException {
+      assert term != null;
+      termsEnum.seekExact(term, termState);
+      return termsEnum.docsAndPositions(liveDocs, null, flags.docsAndPositionsFlags());
+    }
+
+  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/PositionQueue.java b/lucene/core/src/java/org/apache/lucene/search/PositionQueue.java
new file mode 100644
index 0000000..594e366
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/PositionQueue.java
@@ -0,0 +1,127 @@
+package org.apache.lucene.search;
+
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.search.posfilter.Interval;
+import org.apache.lucene.util.PriorityQueue;
+
+import java.io.IOException;
+
+/**
+ * Copyright (c) 2013 Lemur Consulting Ltd.
+ * <p/>
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+public class PositionQueue extends PriorityQueue<PositionQueue.DocsEnumRef> {
+
+  public class DocsEnumRef {
+
+    public final DocsEnum docsEnum;
+    public final int ord;
+    public Interval interval = new Interval();
+
+    public DocsEnumRef(DocsEnum docsEnum, int ord) {
+      this.docsEnum = docsEnum;
+      this.ord = ord;
+    }
+
+    public int nextPosition() throws IOException {
+      assert docsEnum.docID() != -1;
+      if (docsEnum.docID() == DocsEnum.NO_MORE_DOCS || docsEnum.docID() != docId
+            || docsEnum.nextPosition() == DocsEnum.NO_MORE_POSITIONS)
+        interval.setMaximum();
+      else
+        interval.update(this.docsEnum);
+      return interval.begin;
+    }
+
+  }
+
+  boolean positioned = false;
+  Interval current = new Interval();
+  int docId = -1;
+  protected int queuesize;
+
+  public PositionQueue(DocsEnum... subDocsEnums) {
+    super(subDocsEnums.length);
+    for (int i = 0; i < subDocsEnums.length; i++) {
+      add(new DocsEnumRef(subDocsEnums[i], i));
+    }
+    queuesize = subDocsEnums.length;
+  }
+
+  protected void init() throws IOException {
+    queuesize = 0;
+    for (Object scorerRef : getHeapArray()) {
+      if (scorerRef != null) {
+        ((DocsEnumRef) scorerRef).nextPosition();
+        queuesize++;
+      }
+    }
+    updateTop();
+  }
+
+  public int nextPosition() throws IOException {
+    if (!positioned) {
+      init();
+      positioned = true;
+      current.update(top().interval);
+      return current.begin;
+    };
+    if (current.begin == DocsEnum.NO_MORE_POSITIONS)
+      return DocsEnum.NO_MORE_POSITIONS;
+    if (top().nextPosition() == DocsEnum.NO_MORE_POSITIONS)
+      queuesize--;
+    updateInternalIntervals();
+    updateTop();
+    current.update(top().interval);
+    //System.out.println("PQ: " + current.toString());
+    return current.begin;
+  }
+
+  @Override
+  protected boolean lessThan(DocsEnumRef a, DocsEnumRef b) {
+    if (a.docsEnum.docID() < b.docsEnum.docID())
+      return true;
+    if (a.docsEnum.docID() > b.docsEnum.docID())
+      return false;
+    return a.interval.begin < b.interval.begin;
+  }
+
+  protected void updateInternalIntervals() {}
+
+  /**
+   * Must be called after the scorers have been advanced
+   */
+  public void advanceTo(int doc) {
+    positioned = false;
+    this.docId = doc;
+    this.queuesize = this.size();
+  }
+
+  public int startPosition() throws IOException {
+    return current.begin;
+  }
+
+  public int endPosition() throws IOException {
+    return current.end;
+  }
+
+  public int startOffset() throws IOException {
+    return current.offsetBegin;
+  }
+
+  public int endOffset() throws IOException {
+    return current.offsetEnd;
+  }
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/PositionsCollector.java b/lucene/core/src/java/org/apache/lucene/search/PositionsCollector.java
new file mode 100644
index 0000000..43e18bb
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/PositionsCollector.java
@@ -0,0 +1,88 @@
+package org.apache.lucene.search;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.search.posfilter.Interval;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class PositionsCollector extends Collector {
+
+  private Scorer scorer;
+  private int docsSeen = 0;
+
+  private final int numDocs;
+  private DocPositions[] positions;
+
+  public PositionsCollector(int numDocs) {
+    this.numDocs = numDocs;
+    this.positions = new DocPositions[this.numDocs];
+  }
+
+  @Override
+  public void setScorer(Scorer scorer) throws IOException {
+    this.scorer = scorer;
+  }
+
+  @Override
+  public void collect(int doc) throws IOException {
+    if (docsSeen >= numDocs)
+      return;
+    DocPositions dp = new DocPositions(doc);
+    while (scorer.nextPosition() != DocsEnum.NO_MORE_POSITIONS) {
+      dp.positions.add(new Interval(scorer));
+    }
+    positions[docsSeen] = dp;
+    docsSeen++;
+  }
+
+  @Override
+  public void setNextReader(AtomicReaderContext context) throws IOException {
+
+  }
+
+  @Override
+  public boolean acceptsDocsOutOfOrder() {
+    return true;
+  }
+
+  public DocPositions[] getPositions() {
+    return positions;
+  }
+
+  public int getNumDocs() {
+    return docsSeen;
+  }
+
+  public static class DocPositions {
+
+    public final int doc;
+    public final List<Interval> positions;
+
+    DocPositions(int doc) {
+      this.doc = doc;
+      this.positions = new ArrayList<Interval>();
+    }
+
+  }
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/QueryWrapperFilter.java b/lucene/core/src/java/org/apache/lucene/search/QueryWrapperFilter.java
index 823b47b..c4d9e06 100644
--- a/lucene/core/src/java/org/apache/lucene/search/QueryWrapperFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/search/QueryWrapperFilter.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Weight.PostingFeatures;
 import org.apache.lucene.util.Bits;
 
 /** 
@@ -56,7 +57,7 @@
     return new DocIdSet() {
       @Override
       public DocIdSetIterator iterator() throws IOException {
-        return weight.scorer(privateContext, true, false, acceptDocs);
+        return weight.scorer(privateContext, true, false, PostingFeatures.DOCS_AND_FREQS, acceptDocs);
       }
       @Override
       public boolean isCacheable() { return false; }
diff --git a/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java b/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
index 14a3cf2..b7680db 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
@@ -17,6 +17,9 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.search.intervals.ConjunctionIntervalIterator;
+import org.apache.lucene.search.intervals.IntervalIterator;
+
 import java.io.IOException;
 import java.util.Collection;
 import java.util.Collections;
@@ -110,6 +113,31 @@
   }
 
   @Override
+  public int nextPosition() throws IOException {
+    return reqScorer.nextPosition();
+  }
+
+  @Override
+  public int startPosition() throws IOException {
+    return reqScorer.startPosition();
+  }
+
+  @Override
+  public int endPosition() throws IOException {
+    return reqScorer.endPosition();
+  }
+
+  @Override
+  public int startOffset() throws IOException {
+    return reqScorer.startOffset();
+  }
+
+  @Override
+  public int endOffset() throws IOException {
+    return reqScorer.endOffset();
+  }
+
+  @Override
   public Collection<ChildScorer> getChildren() {
     return Collections.singleton(new ChildScorer(reqScorer, "FILTERED"));
   }
@@ -130,6 +158,13 @@
   }
 
   @Override
+  public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+    if (reqScorer == null)
+      return IntervalIterator.NO_MORE_INTERVALS;
+    return new ConjunctionIntervalIterator(this, collectIntervals, reqScorer.intervals(collectIntervals));
+  }
+
+  @Override
   public long cost() {
     return reqScorer.cost();
   }
diff --git a/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java b/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java
index 4e1c40c..c341139 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java
@@ -16,6 +16,9 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.search.intervals.DisjunctionIntervalIterator;
+import org.apache.lucene.search.intervals.IntervalIterator;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -31,6 +34,7 @@
    */
   private Scorer reqScorer;
   private Scorer optScorer;
+  private PositionQueue posQueue;
 
   /** Construct a <code>ReqOptScorer</code>.
    * @param reqScorer The required scorer. This must match.
@@ -45,16 +49,21 @@
     assert optScorer != null;
     this.reqScorer = reqScorer;
     this.optScorer = optScorer;
+    posQueue = new PositionQueue(reqScorer, optScorer);
   }
 
   @Override
   public int nextDoc() throws IOException {
-    return reqScorer.nextDoc();
+    int doc = reqScorer.nextDoc();
+    posQueue.advanceTo(doc);
+    return doc;
   }
   
   @Override
   public int advance(int target) throws IOException {
-    return reqScorer.advance(target);
+    int doc = reqScorer.advance(target);
+    posQueue.advanceTo(doc);
+    return doc;
   }
   
   @Override
@@ -86,6 +95,11 @@
   }
 
   @Override
+  public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+    return new DisjunctionIntervalIterator(this, collectIntervals, pullIterators(collectIntervals, reqScorer, optScorer));
+  }
+
+  @Override
   public int freq() throws IOException {
     // we might have deferred advance()
     score();
@@ -93,6 +107,34 @@
   }
 
   @Override
+  public int nextPosition() throws IOException {
+    int optDoc = optScorer.docID();
+    if (optDoc < reqScorer.docID())
+      optScorer.advance(reqScorer.docID());
+    return posQueue.nextPosition();
+  }
+
+  @Override
+  public int startPosition() throws IOException {
+    return posQueue.startPosition();
+  }
+
+  @Override
+  public int endPosition() throws IOException {
+    return posQueue.endPosition();
+  }
+
+  @Override
+  public int startOffset() throws IOException {
+    return posQueue.startOffset();
+  }
+
+  @Override
+  public int endOffset() throws IOException {
+    return posQueue.endOffset();
+  }
+
+  @Override
   public Collection<ChildScorer> getChildren() {
     ArrayList<ChildScorer> children = new ArrayList<ChildScorer>(2);
     children.add(new ChildScorer(reqScorer, "MUST"));
diff --git a/lucene/core/src/java/org/apache/lucene/search/ScoreCachingWrappingScorer.java b/lucene/core/src/java/org/apache/lucene/search/ScoreCachingWrappingScorer.java
index f850f53..80721e6 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ScoreCachingWrappingScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ScoreCachingWrappingScorer.java
@@ -17,6 +17,8 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.search.intervals.IntervalIterator;
+
 import java.io.IOException;
 import java.util.Collection;
 import java.util.Collections;
@@ -86,6 +88,11 @@
   }
 
   @Override
+  public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+    return scorer.intervals(collectIntervals);
+  }
+
+  @Override
   public Collection<ChildScorer> getChildren() {
     return Collections.singleton(new ChildScorer(scorer, "CACHED"));
   }
diff --git a/lucene/core/src/java/org/apache/lucene/search/Scorer.java b/lucene/core/src/java/org/apache/lucene/search/Scorer.java
index 47fef12..10bc3d9 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Scorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Scorer.java
@@ -17,12 +17,13 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.search.intervals.IntervalIterator;
+
 import java.io.IOException;
 import java.util.Collection;
 import java.util.Collections;
 
-import org.apache.lucene.index.DocsEnum;
-
 /**
  * Expert: Common scoring functionality for different types of queries.
  *
@@ -65,6 +66,52 @@
       collector.collect(doc);
     }
   }
+  
+  /**
+   * Expert: Retrieves an {@link IntervalIterator} for this scorer allowing
+   * access to position and offset intervals for each
+   * matching document.  Call this up-front and use it as
+   * long as you are still using this scorer.  The
+   * returned iterator is bound to scorer that created it;
+   * after {@link #nextDoc} or {@link #advance} you must
+   * call {@link IntervalIterator#scorerAdvanced} before
+   * iterating over that document's intervals.
+   * 
+   * @param collectIntervals
+   *          if <code>true</code> the {@link IntervalIterator} can be used to
+   *          collect all individual sub-intervals this {@link IntervalIterator}
+   *          is composed of via
+   *          {@link IntervalIterator#collect(org.apache.lucene.search.intervals.IntervalCollector)}
+   * @return an {@link IntervalIterator} over matching intervals
+   * @throws IOException
+   *           if a low-level I/O error is encountered
+   *
+   * @lucene.experimental
+   */
+  public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+    return null;
+  };
+
+  /**
+   * Get the IntervalIterators from a list of scorers
+   * @param collectIntervals true if positions will be collected
+   * @param scorers the list of scorers to retrieve IntervalIterators from
+   * @return a list of IntervalIterators pulled from the passed in Scorers
+   * @throws java.io.IOException if a low-evel I/O error is encountered
+   */
+  public static IntervalIterator[] pullIterators(boolean collectIntervals, Scorer... scorers)
+      throws IOException {
+    IntervalIterator[] iterators = new IntervalIterator[scorers.length];
+    for (int i = 0; i < scorers.length; i++) {
+      if (scorers[i] == null) {
+        iterators[i] = IntervalIterator.NO_MORE_INTERVALS;
+      }
+      else {
+        iterators[i] = scorers[i].intervals(collectIntervals);
+      }
+    }
+    return iterators;
+  }
 
   /**
    * Expert: Collects matching documents in a range. Hook for optimization.
@@ -103,6 +150,20 @@
   public Weight getWeight() {
     return weight;
   }
+
+  @Override
+  public int nextPosition() throws IOException {
+    throw new UnsupportedOperationException("nextPosition() is not implemented on " + this.getClass());
+  }
+
+  @Override
+  public String toString() {
+    try {
+      return String.format("%d:%d(%d)->%d(%d)", docID(), startPosition(), startOffset(), endPosition(), endOffset());
+    } catch (IOException e) {
+      return String.format("Cannot retrieve position due to IOException");
+    }
+  }
   
   /** Returns child sub-scorers
    * @lucene.experimental */
diff --git a/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java b/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
index 0667d8b..f46bc5f 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
@@ -17,24 +17,34 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.posfilter.Interval;
+import org.apache.lucene.search.intervals.IntervalCollector;
+import org.apache.lucene.search.intervals.IntervalIterator;
+import org.apache.lucene.search.intervals.SloppyIntervalIterator;
+import org.apache.lucene.search.intervals.TermIntervalIterator;
+import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.OpenBitSet;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
 
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.similarities.Similarity;
-import org.apache.lucene.util.OpenBitSet;
-
-final class SloppyPhraseScorer extends Scorer {
+final class  SloppyPhraseScorer extends Scorer {
   private PhrasePositions min, max;
 
   private float sloppyFreq; //phrase frequency in current doc as computed by phraseFreq().
 
   private final Similarity.SimScorer docScorer;
+  private final PhraseQuery.PostingsAndFreq[] postings;
   
   private final int slop;
   private final int numPostings;
@@ -55,6 +65,7 @@
       int slop, Similarity.SimScorer docScorer) {
     super(weight);
     this.docScorer = docScorer;
+    this.postings = postings;
     this.slop = slop;
     this.numPostings = postings==null ? 0 : postings.length;
     pq = new PhraseQueue(postings.length);
@@ -597,4 +608,96 @@
 
   @Override
   public String toString() { return "scorer(" + weight + ")"; }
+
+  @Override
+  public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+    Map<Term, IterAndOffsets> map = new HashMap<Term, IterAndOffsets>();
+    List<DocsEnum> enums = new ArrayList<DocsEnum>();
+
+    for (int i = 0; i < postings.length; i++) {
+      if (postings[i].terms.length > 1) {
+        throw new UnsupportedOperationException("IntervalIterators for MulitPhraseQuery is not supported");
+      }
+      Term term = postings[i].terms[0];
+      IterAndOffsets iterAndOffset;
+      if (!map.containsKey(term)) {
+        DocsEnum docsAndPosEnum = postings[i].factory
+            .docsAndPositionsEnum();
+        enums.add(docsAndPosEnum);
+        iterAndOffset = new IterAndOffsets(new TermIntervalIterator(this, docsAndPosEnum, false,
+            collectIntervals));
+        map.put(term, iterAndOffset);
+      } else {
+        iterAndOffset = map.get(term);
+      }
+      iterAndOffset.offsets.add(postings[i].position);
+    }
+    Collection<IterAndOffsets> values = map.values();
+    IntervalIterator[] iters = new IntervalIterator[values.size()];
+    int i = 0;
+    for (IterAndOffsets iterAndOffsets : values) {
+      iters[i++] = SloppyIntervalIterator.create(this, collectIntervals, iterAndOffsets.iter, iterAndOffsets.toIntArray());
+    }
+    return new AdvancingIntervalIterator(this, collectIntervals, enums.toArray(new DocsEnum[enums.size()]), new SloppyIntervalIterator(this, slop, collectIntervals, iters));
+  }
+
+  private final static class IterAndOffsets {
+    final List<Integer> offsets = new ArrayList<Integer>();
+    final IntervalIterator iter;
+
+    IterAndOffsets(IntervalIterator iter) {
+      this.iter = iter;
+    }
+
+    int[] toIntArray() {
+      int[] array = new int[offsets.size()];
+      for (int i = 0; i < array.length; i++) {
+        array[i] = offsets.get(i).intValue();
+      }
+      return array;
+    }
+  }
+
+  final static class AdvancingIntervalIterator extends IntervalIterator {
+
+    public AdvancingIntervalIterator(Scorer scorer, boolean collectIntervals, final DocsEnum[] enums, final IntervalIterator delegate) {
+      super(scorer, collectIntervals);
+      this.enums = enums;
+      this.delegate = delegate;
+    }
+
+    private final DocsEnum[] enums;
+    private final IntervalIterator delegate;
+    @Override
+    public int scorerAdvanced(int docId) throws IOException {
+      assert docId == docID();
+      for (DocsEnum oneEnum : enums) {
+        int advance = oneEnum.advance(docId);
+        assert advance == docId;
+      }
+      delegate.scorerAdvanced(docId);
+      return docId;
+    }
+
+    @Override
+    public Interval next() throws IOException {
+      return delegate.next();
+    }
+
+    @Override
+    public void collect(IntervalCollector collector) {
+      delegate.collect(collector);
+    }
+
+    @Override
+    public IntervalIterator[] subs(boolean inOrder) {
+      return delegate.subs(inOrder);
+    }
+
+    @Override
+    public int matchDistance() {
+      return delegate.matchDistance();
+    }
+
+  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermQuery.java b/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
index b6a1f23..0930f5c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
@@ -17,72 +17,85 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.Set;
-
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.IndexReaderContext;
 import org.apache.lucene.index.ReaderUtil;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermContext;
 import org.apache.lucene.index.TermState;
 import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.ToStringUtils;
 
-/** A Query that matches documents containing a term.
-  This may be combined with other terms with a {@link BooleanQuery}.
-  */
+import java.io.IOException;
+import java.util.Set;
+
+/**
+ * A Query that matches documents containing a term. This may be combined with
+ * other terms with a {@link BooleanQuery}.
+ */
 public class TermQuery extends Query {
   private final Term term;
   private final int docFreq;
   private final TermContext perReaderTermState;
-
+  
   final class TermWeight extends Weight {
     private final Similarity similarity;
     private final Similarity.SimWeight stats;
     private final TermContext termStates;
-
+    
     public TermWeight(IndexSearcher searcher, TermContext termStates)
-      throws IOException {
+        throws IOException {
       assert termStates != null : "TermContext must not be null";
       this.termStates = termStates;
       this.similarity = searcher.getSimilarity();
-      this.stats = similarity.computeWeight(
-          getBoost(), 
-          searcher.collectionStatistics(term.field()), 
+      this.stats = similarity.computeWeight(getBoost(),
+          searcher.collectionStatistics(term.field()),
           searcher.termStatistics(term, termStates));
     }
-
+    
     @Override
-    public String toString() { return "weight(" + TermQuery.this + ")"; }
-
+    public String toString() {
+      return "weight(" + TermQuery.this + ")";
+    }
+    
     @Override
-    public Query getQuery() { return TermQuery.this; }
-
+    public Query getQuery() {
+      return TermQuery.this;
+    }
+    
     @Override
     public float getValueForNormalization() {
       return stats.getValueForNormalization();
     }
-
+    
     @Override
     public void normalize(float queryNorm, float topLevelBoost) {
       stats.normalize(queryNorm, topLevelBoost);
     }
-
+    
     @Override
     public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-        boolean topScorer, Bits acceptDocs) throws IOException {
-      assert termStates.topReaderContext == ReaderUtil.getTopLevelContext(context) : "The top-reader used to create Weight (" + termStates.topReaderContext + ") is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context);
+        boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
+      assert termStates.topReaderContext == ReaderUtil
+          .getTopLevelContext(context) : "The top-reader used to create Weight ("
+          + termStates.topReaderContext
+          + ") is not the same as the current reader's top-reader ("
+          + ReaderUtil.getTopLevelContext(context);
       final TermsEnum termsEnum = getTermsEnum(context);
       if (termsEnum == null) {
         return null;
       }
-      DocsEnum docs = termsEnum.docs(acceptDocs, null);
+      DocsEnum docs;
+      if (flags.compareTo(PostingFeatures.POSITIONS) < 0) {
+        docs = termsEnum.docs(acceptDocs, null, flags.docFlags());
+      } else {
+        docs =  termsEnum.docsAndPositions(acceptDocs, null, flags.docsAndPositionsFlags());
+      }
       assert docs != null;
       return new TermScorer(this, docs, similarity.simScorer(stats, context));
     }
@@ -97,90 +110,102 @@
         assert termNotInReader(context.reader(), term) : "no termstate found but term exists in reader term=" + term;
         return null;
       }
-      //System.out.println("LD=" + reader.getLiveDocs() + " set?=" + (reader.getLiveDocs() != null ? reader.getLiveDocs().get(0) : "null"));
-      final TermsEnum termsEnum = context.reader().terms(term.field()).iterator(null);
+      // System.out.println("LD=" + reader.getLiveDocs() + " set?=" +
+      // (reader.getLiveDocs() != null ? reader.getLiveDocs().get(0) : "null"));
+      final TermsEnum termsEnum = context.reader().terms(term.field())
+          .iterator(null);
       termsEnum.seekExact(term.bytes(), state);
       return termsEnum;
     }
     
     private boolean termNotInReader(AtomicReader reader, Term term) throws IOException {
       // only called from assert
-      //System.out.println("TQ.termNotInReader reader=" + reader + " term=" + field + ":" + bytes.utf8ToString());
+      // System.out.println("TQ.termNotInReader reader=" + reader + " term=" +
+      // field + ":" + bytes.utf8ToString());
       return reader.docFreq(term) == 0;
     }
     
     @Override
-    public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
-      Scorer scorer = scorer(context, true, false, context.reader().getLiveDocs());
+    public Explanation explain(AtomicReaderContext context, int doc)
+        throws IOException {
+      Scorer scorer = scorer(context, true, false, PostingFeatures.DOCS_AND_FREQS, context.reader()
+              .getLiveDocs());
       if (scorer != null) {
         int newDoc = scorer.advance(doc);
         if (newDoc == doc) {
           float freq = scorer.freq();
           SimScorer docScorer = similarity.simScorer(stats, context);
           ComplexExplanation result = new ComplexExplanation();
-          result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
-          Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "termFreq=" + freq));
+          result.setDescription("weight(" + getQuery() + " in " + doc + ") ["
+              + similarity.getClass().getSimpleName() + "], result of:");
+          Explanation scoreExplanation = docScorer.explain(doc,
+              new Explanation(freq, "termFreq=" + freq));
           result.addDetail(scoreExplanation);
           result.setValue(scoreExplanation.getValue());
           result.setMatch(true);
           return result;
         }
       }
-      return new ComplexExplanation(false, 0.0f, "no matching term");      
+      return new ComplexExplanation(false, 0.0f, "no matching term");
     }
   }
-
+  
   /** Constructs a query for the term <code>t</code>. */
   public TermQuery(Term t) {
     this(t, -1);
   }
-
-  /** Expert: constructs a TermQuery that will use the
-   *  provided docFreq instead of looking up the docFreq
-   *  against the searcher. */
+  
+  /**
+   * Expert: constructs a TermQuery that will use the provided docFreq instead
+   * of looking up the docFreq against the searcher.
+   */
   public TermQuery(Term t, int docFreq) {
     term = t;
     this.docFreq = docFreq;
     perReaderTermState = null;
   }
   
-  /** Expert: constructs a TermQuery that will use the
-   *  provided docFreq instead of looking up the docFreq
-   *  against the searcher. */
+  /**
+   * Expert: constructs a TermQuery that will use the provided docFreq instead
+   * of looking up the docFreq against the searcher.
+   */
   public TermQuery(Term t, TermContext states) {
     assert states != null;
     term = t;
     docFreq = states.docFreq();
     perReaderTermState = states;
   }
-
+  
   /** Returns the term of this query. */
-  public Term getTerm() { return term; }
-
+  public Term getTerm() {
+    return term;
+  }
+  
   @Override
   public Weight createWeight(IndexSearcher searcher) throws IOException {
     final IndexReaderContext context = searcher.getTopReaderContext();
     final TermContext termState;
-    if (perReaderTermState == null || perReaderTermState.topReaderContext != context) {
-      // make TermQuery single-pass if we don't have a PRTS or if the context differs!
+    if (perReaderTermState == null
+        || perReaderTermState.topReaderContext != context) {
+      // make TermQuery single-pass if we don't have a PRTS or if the context
+      // differs!
       termState = TermContext.build(context, term);
     } else {
-     // PRTS was pre-build for this IS
-     termState = this.perReaderTermState;
+      // PRTS was pre-build for this IS
+      termState = this.perReaderTermState;
     }
-
+    
     // we must not ignore the given docFreq - if set use the given value (lie)
-    if (docFreq != -1)
-      termState.setDocFreq(docFreq);
+    if (docFreq != -1) termState.setDocFreq(docFreq);
     
     return new TermWeight(searcher, termState);
   }
-
+  
   @Override
   public void extractTerms(Set<Term> terms) {
     terms.add(getTerm());
   }
-
+  
   /** Prints a user-readable version of this query. */
   @Override
   public String toString(String field) {
@@ -193,21 +218,20 @@
     buffer.append(ToStringUtils.boost(getBoost()));
     return buffer.toString();
   }
-
+  
   /** Returns true iff <code>o</code> is equal to this. */
   @Override
   public boolean equals(Object o) {
-    if (!(o instanceof TermQuery))
-      return false;
-    TermQuery other = (TermQuery)o;
+    if (!(o instanceof TermQuery)) return false;
+    TermQuery other = (TermQuery) o;
     return (this.getBoost() == other.getBoost())
-      && this.term.equals(other.term);
+        && this.term.equals(other.term);
   }
-
-  /** Returns a hash code value for this object.*/
+  
+  /** Returns a hash code value for this object. */
   @Override
   public int hashCode() {
     return Float.floatToIntBits(getBoost()) ^ term.hashCode();
   }
-
+  
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermScorer.java b/lucene/core/src/java/org/apache/lucene/search/TermScorer.java
index 6697524..dafea96 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermScorer.java
@@ -17,10 +17,13 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-
 import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.search.intervals.IntervalIterator;
+import org.apache.lucene.search.intervals.TermIntervalIterator;
 import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.BytesRef;
+
+import java.io.IOException;
 
 /** Expert: A <code>Scorer</code> for documents matching a <code>Term</code>.
  */
@@ -64,6 +67,36 @@
   public int nextDoc() throws IOException {
     return docsEnum.nextDoc();
   }
+
+  @Override
+  public int nextPosition() throws IOException {
+    return docsEnum.nextPosition();
+  }
+
+  @Override
+  public int startPosition() throws IOException {
+    return docsEnum.startPosition();
+  }
+
+  @Override
+  public int endPosition() throws IOException {
+    return docsEnum.endPosition();
+  }
+
+  @Override
+  public int startOffset() throws IOException {
+    return docsEnum.startOffset();
+  }
+
+  @Override
+  public int endOffset() throws IOException {
+    return docsEnum.endOffset();
+  }
+
+  @Override
+  public BytesRef getPayload() throws IOException {
+    return docsEnum.getPayload();
+  }
   
   @Override
   public float score() throws IOException {
@@ -92,5 +125,20 @@
 
   /** Returns a string representation of this <code>TermScorer</code>. */
   @Override
-  public String toString() { return "scorer(" + weight + ")"; }
+  public String toString() {
+    return "scorer(" + weight + ")[" + super.toString() + "]";
+  }
+  
+  @Override
+  public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+    return new TermIntervalIterator(this, docsEnum, false, collectIntervals);
+  }
+  // TODO: benchmark if the specialized conjunction really benefits
+  // from this, or if instead its from sorting by docFreq, or both
+
+  DocsEnum getDocsEnum() {
+    return docsEnum;
+  }
+
+
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java b/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java
index 1704d8b..a11ba25 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java
@@ -18,6 +18,7 @@
  */
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Weight.PostingFeatures;
 
 /**
  * Just counts the total number of hits.
@@ -41,6 +42,12 @@
   }
 
   @Override
+  public PostingFeatures postingFeatures() {
+    // we don't need frequencies here
+    return PostingFeatures.DOCS_ONLY;
+  }
+
+  @Override
   public void setNextReader(AtomicReaderContext context) {
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/Weight.java b/lucene/core/src/java/org/apache/lucene/search/Weight.java
index 48dd209..aa031890 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Weight.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Weight.java
@@ -17,14 +17,16 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-
-import org.apache.lucene.index.AtomicReader; // javadocs
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
-import org.apache.lucene.index.IndexReaderContext; // javadocs
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.util.Bits;
 
+import java.io.IOException;
+
 /**
  * Expert: Calculate query weights and build query scorers.
  * <p>
@@ -36,7 +38,7 @@
  * <p>
  * Since {@link Weight} creates {@link Scorer} instances for a given
  * {@link AtomicReaderContext} ({@link #scorer(AtomicReaderContext, 
- * boolean, boolean, Bits)})
+ * boolean, boolean, PostingFeatures, Bits)})
  * callers must maintain the relationship between the searcher's top-level
  * {@link IndexReaderContext} and the context used to create a {@link Scorer}. 
  * <p>
@@ -51,7 +53,7 @@
  * <li>The query normalization factor is passed to {@link #normalize(float, float)}. At
  * this point the weighting is complete.
  * <li>A <code>Scorer</code> is constructed by
- * {@link #scorer(AtomicReaderContext, boolean, boolean, Bits)}.
+ * {@link #scorer(AtomicReaderContext, boolean, boolean, PostingFeatures, Bits)}.
  * </ol>
  * 
  * @since 2.9
@@ -103,21 +105,21 @@
    *          if true, {@link Scorer#score(Collector)} will be called; if false,
    *          {@link Scorer#nextDoc()} and/or {@link Scorer#advance(int)} will
    *          be called.
+   * @param flags the low level {@link PostingFeatures} for this scorer.
    * @param acceptDocs
    *          Bits that represent the allowable docs to match (typically deleted docs
    *          but possibly filtering other documents)
-   *          
    * @return a {@link Scorer} which scores documents in/out-of order.
    * @throws IOException if there is a low-level I/O error
    */
   public abstract Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-      boolean topScorer, Bits acceptDocs) throws IOException;
+      boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException;
 
   /**
    * Returns true iff this implementation scores docs only out of order. This
    * method is used in conjunction with {@link Collector}'s
    * {@link Collector#acceptsDocsOutOfOrder() acceptsDocsOutOfOrder} and
-   * {@link #scorer(AtomicReaderContext, boolean, boolean, Bits)} to
+   * {@link #scorer(AtomicReaderContext, boolean, boolean, PostingFeatures, Bits)} to
    * create a matching {@link Scorer} instance for a given {@link Collector}, or
    * vice versa.
    * <p>
@@ -125,4 +127,55 @@
    * the <code>Scorer</code> scores documents in-order.
    */
   public boolean scoresDocsOutOfOrder() { return false; }
+  
+  /**
+   * Feature flags used to control low-level posting list features. These flags
+   * all Collectors and scorers to specify their requirements for document
+   * collection and scoring ahead of time for best performance.
+   */
+  public static enum PostingFeatures {
+    /**Only document IDs are required for document collection and scoring*/
+    DOCS_ONLY(0, 0), 
+    /**Document IDs and Term Frequencies are required for document collection and scoring*/
+    DOCS_AND_FREQS(DocsEnum.FLAG_FREQS, 0),
+    /**Document IDs, Term Frequencies and Positions are required for document collection and scoring*/
+    POSITIONS(DocsEnum.FLAG_FREQS, 0),
+    /**Document IDs, Term Frequencies, Positions and Payloads are required for document collection and scoring*/
+    POSITIONS_AND_PAYLOADS(DocsEnum.FLAG_FREQS, DocsEnum.FLAG_PAYLOADS),
+    /**Document IDs, Term Frequencies, Positions and Offsets are required for document collection and scoring*/
+    OFFSETS(DocsEnum.FLAG_FREQS, DocsEnum.FLAG_OFFSETS),
+    /**Document IDs, Term Frequencies, Positions, Offsets and Payloads are required for document collection and scoring*/
+    OFFSETS_AND_PAYLOADS(DocsEnum.FLAG_FREQS, DocsEnum.FLAG_OFFSETS
+            | DocsEnum.FLAG_PAYLOADS);
+    
+    private final int docsAndPositionsFlags;
+    private final int docFlags;
+    
+    private PostingFeatures(int docFlags, int docsAndPositionsFlags) {
+      this.docsAndPositionsFlags = docsAndPositionsFlags;
+      this.docFlags = docFlags;
+    }
+    
+    /**
+     * Returns the flags for {@link DocsEnum}. This value should be
+     * passed to
+     * {@link TermsEnum#docsAndPositions(Bits, DocsEnum, int)}
+     * 
+     * @return {@link DocsEnum} flags
+     */
+    public int docsAndPositionsFlags() {
+      return docsAndPositionsFlags;
+    }
+    
+    /**
+     * Returns the flags for {@link DocsEnum}. This value should be
+     * passed to
+     * {@link TermsEnum#docs(Bits, DocsEnum, int)}
+     * 
+     * @return {@link DocsEnum} flags
+     */
+    public int docFlags() {
+      return docFlags;
+    }
+  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/BlockIntervalFilter.java b/lucene/core/src/java/org/apache/lucene/search/intervals/BlockIntervalFilter.java
new file mode 100644
index 0000000..6b87f16
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/BlockIntervalFilter.java
@@ -0,0 +1,36 @@
+package org.apache.lucene.search.intervals;
+
+/**
+ * Copyright (c) 2012 Lemur Consulting Ltd.
+ * <p/>
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class BlockIntervalFilter implements IntervalFilter {
+
+  private final boolean collectLeaves;
+
+  public BlockIntervalFilter() {
+    this(true);
+  }
+
+  public BlockIntervalFilter(boolean collectLeaves) {
+    this.collectLeaves = collectLeaves;
+  }
+
+  @Override
+  public IntervalIterator filter(boolean collectIntervals, IntervalIterator iter) {
+    return new BlockIntervalIterator(collectIntervals, collectLeaves, iter);
+  }
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/BlockIntervalIterator.java b/lucene/core/src/java/org/apache/lucene/search/intervals/BlockIntervalIterator.java
new file mode 100644
index 0000000..cb48bea
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/BlockIntervalIterator.java
@@ -0,0 +1,182 @@
+package org.apache.lucene.search.intervals;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.posfilter.Interval;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+/**
+ * An IntervalIterator implementing minimum interval semantics for the
+ * BLOCK operator
+ *
+ * See <a href=
+ * "http://vigna.dsi.unimi.it/ftp/papers/EfficientAlgorithmsMinimalIntervalSemantics"
+ * >"Efficient Optimally Lazy Algorithms for Minimal-Interval Semantics"</a>
+ *
+ * @lucene.experimental
+ */
+public final class BlockIntervalIterator extends IntervalIterator {
+  private final IntervalIterator[] iterators;
+
+  private static final Interval INFINITE_INTERVAL = new Interval(
+      Integer.MIN_VALUE, Integer.MIN_VALUE, -1, -1);
+  private final Interval[] intervals;
+  private final Interval interval = new Interval(
+      Integer.MIN_VALUE, Integer.MIN_VALUE, -1, -1);
+  private final int[] gaps;
+
+  private final int lastIter;
+  private boolean collectLeaves = true;
+
+  public BlockIntervalIterator(boolean collectIntervals, boolean collectLeaves, IntervalIterator other) {
+    this(collectIntervals, other);
+    this.collectLeaves = collectLeaves;
+  }
+
+  /**
+   * Construct a BlockIntervalIterator over a compound IntervalIterator.  The
+   * sub-iterators must be in order and sequential for a match.
+   * @param collectIntervals <code>true</code> if intervals will be collected
+   * @param other the compound {@link IntervalIterator} used to extract the individual block iterators
+   */
+  public BlockIntervalIterator(boolean collectIntervals, IntervalIterator other) {
+    this(collectIntervals, defaultIncrements(other.subs(true).length), other);
+  }
+
+  /**
+   * Construct a BlockIntervalIterator over a compound IntervalIterator using
+   * a supplied increments array.
+   * @param collectIntervals <code>true</code> if intervals will be collected
+   * @param increments an array of position increments between the iterators
+   * @param other the compound {@link IntervalIterator} used to extract the individual block iterators 
+   */
+  public BlockIntervalIterator(boolean collectIntervals, int[] increments, IntervalIterator other) {
+    super(other.getScorer(), collectIntervals);
+    assert other.subs(true) != null;
+    iterators = other.subs(true);
+    assert iterators.length > 1;
+    intervals = new Interval[iterators.length];
+    lastIter = iterators.length - 1;
+    this.gaps = increments;
+  }
+
+  /**
+   * Construct a BlockIntervalIterator over a set of subiterators using a supplied
+   * increments array
+   * @param scorer the parent Scorer
+   * @param increments an array of position increments between the iterators
+   * @param collectIntervals true if intervals will be collected
+   * @param iterators the subiterators
+   */
+  public BlockIntervalIterator(Scorer scorer, int[] increments, boolean collectIntervals,
+                               IntervalIterator... iterators) {
+    super(scorer, collectIntervals);
+    assert iterators.length > 1;
+    this.iterators = iterators;
+    intervals = new Interval[iterators.length];
+    lastIter = iterators.length - 1;
+    this.gaps = increments;
+  }
+
+  /**
+   * Construct a BlockIntervalIterator over a set of subiterators
+   * @param scorer the parent Scorer
+   * @param collectIntervals true if intervals will be collected
+   * @param iterators the subiterators
+   */
+  public BlockIntervalIterator(Scorer scorer, boolean collectIntervals, IntervalIterator... iterators) {
+    this(scorer, defaultIncrements(iterators.length), collectIntervals, iterators);
+  }
+
+  private static int[] defaultIncrements(int num) {
+    int[] gaps = new int[num];
+    Arrays.fill(gaps, 1);
+    return gaps;
+  }
+
+  @Override
+  public Interval next() throws IOException {
+    if ((intervals[0] = iterators[0].next()) == null) {
+      return null;
+    }
+    int offset = 0;
+    for (int i = 1; i < iterators.length;) {
+      final int gap = gaps[i];
+      while (intervals[i].begin + gap <= intervals[i - 1].end) {
+        if ((intervals[i] = iterators[i].next()) == null) {
+          return null;
+        }
+      }
+      offset += gap;
+      if (intervals[i].begin == intervals[i - 1].end + gaps[i]) {
+        i++;
+        if (i < iterators.length && intervals[i] == INFINITE_INTERVAL) {
+          // advance only if really necessary
+          iterators[i].scorerAdvanced(docID());
+          assert iterators[i].docID() == docID();
+        }
+      } else {
+        do {
+          if ((intervals[0] = iterators[0].next()) == null) {
+            return null;
+          }
+        } while (intervals[0].begin < intervals[i].end - offset);
+
+        i = 1;
+      }
+    }
+    interval.begin = intervals[0].begin;
+    interval.end = intervals[lastIter].end;
+    interval.offsetBegin = intervals[0].offsetBegin;
+    interval.offsetEnd = intervals[lastIter].offsetEnd;
+    return interval;
+  }
+
+  @Override
+  public IntervalIterator[] subs(boolean inOrder) {
+    return iterators;
+  }
+
+  @Override
+  public void collect(IntervalCollector collector) {
+    assert collectIntervals;
+    collector.collectComposite(scorer, interval, docID());
+    if (collectLeaves) {
+      for (IntervalIterator iter : iterators) {
+        iter.collect(collector);
+      }
+    }
+  }
+
+  @Override
+  public int scorerAdvanced(int docId) throws IOException {
+    iterators[0].scorerAdvanced(docId);
+    assert iterators[0].docID() == docId;
+    iterators[1].scorerAdvanced(docId);
+    assert iterators[1].docID() == docId;
+    Arrays.fill(intervals, INFINITE_INTERVAL);
+    return docId;
+  }
+
+  @Override
+  public int matchDistance() {
+    return intervals[lastIter].begin - intervals[0].end;
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/BrouwerianIntervalIterator.java b/lucene/core/src/java/org/apache/lucene/search/intervals/BrouwerianIntervalIterator.java
new file mode 100644
index 0000000..b0de374
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/BrouwerianIntervalIterator.java
@@ -0,0 +1,105 @@
+package org.apache.lucene.search.intervals;
+
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.posfilter.Interval;
+
+import java.io.IOException;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * IntervalIterator based on minimum interval semantics for the Brouwerian
+ * operator. This {@link IntervalIterator} computes the difference <tt>M-S</tt>
+ * between the anti-chains M (minuend) and S (subtracted).
+ * <p>
+ * 
+ * 
+ * See <a href=
+ * "http://vigna.dsi.unimi.it/ftp/papers/EfficientAlgorithmsMinimalIntervalSemantics"
+ * >"Efficient Optimally Lazy Algorithms for Minimal-Interval Semantics"</a>
+ */
+public class BrouwerianIntervalIterator extends IntervalIterator {
+  
+  private final IntervalIterator minuend;
+  private final IntervalIterator subtracted;
+  private Interval subtractedInterval;
+  private Interval currentInterval;
+
+  /**
+   * Construct a new BrouwerianIntervalIterator over a minuend and a subtrahend
+   * IntervalIterator
+   * @param scorer the parent Scorer
+   * @param collectIntervals true if intervals will be collected
+   * @param minuend the minuend IntervalIterator
+   * @param subtracted the subtrahend IntervalIterator
+   */
+  public BrouwerianIntervalIterator(Scorer scorer, boolean collectIntervals, IntervalIterator minuend, IntervalIterator subtracted) {
+    super(scorer, collectIntervals);
+    this.minuend = minuend;
+    this.subtracted = subtracted;
+  }
+
+  @Override
+  public int scorerAdvanced(int docId) throws IOException {
+    minuend.scorerAdvanced(docId);
+    if (subtracted.docID() <= docId)
+      subtracted.scorerAdvanced(docId);
+    subtractedInterval = Interval.INFINITE_INTERVAL;
+    return docId;
+  }
+  
+  @Override
+  public Interval next() throws IOException {
+    if (subtracted.docID() != minuend.docID()) {
+      return currentInterval = minuend.next();
+    }
+    while ((currentInterval = minuend.next()) != null) {
+      while(subtractedInterval.lessThanExclusive(currentInterval) && (subtractedInterval = subtracted.next()) != null) {
+      }
+      if (subtractedInterval == null || !currentInterval.overlaps(subtractedInterval)) {
+        return currentInterval;
+      }
+    }
+    return currentInterval;
+  }
+  
+  @Override
+  public void collect(IntervalCollector collector) {
+    assert collectIntervals;
+    collector.collectComposite(scorer, currentInterval, docID());
+    minuend.collect(collector);
+    
+  }
+  
+  @Override
+  public IntervalIterator[] subs(boolean inOrder) {
+    return new IntervalIterator[] {minuend, subtracted};
+  }
+
+
+  @Override
+  public int matchDistance() {
+    return minuend.matchDistance();
+  }
+
+  @Override
+  public int docID() {
+    return minuend.docID();
+  }
+  
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/ConjunctionIntervalIterator.java b/lucene/core/src/java/org/apache/lucene/search/intervals/ConjunctionIntervalIterator.java
new file mode 100644
index 0000000..070b0fb
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/ConjunctionIntervalIterator.java
@@ -0,0 +1,175 @@
+package org.apache.lucene.search.intervals;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.intervals.IntervalQueue.IntervalRef;
+import org.apache.lucene.search.posfilter.Interval;
+
+import java.io.IOException;
+
+/**
+ * ConjuctionIntervalIterator based on minimal interval semantics for AND
+ * operator.
+ * 
+ * See <a href=
+ * "http://vigna.dsi.unimi.it/ftp/papers/EfficientAlgorithmsMinimalIntervalSemantics"
+ * >"Efficient Optimally Lazy Algorithms for Minimal-Interval Semantics"</a>
+ * 
+ * @lucene.experimental
+ */
+public final class ConjunctionIntervalIterator extends IntervalIterator {
+
+  private final IntervalQueueAnd queue;
+  private final int nrMustMatch;
+  private SnapshotPositionCollector snapshot;
+  private final IntervalIterator[] iterators;
+  private int rightExtremeBegin;
+
+
+  /**
+   * Create a new ConjunctionIntervalIterator over a set of subiterators
+   * @param scorer the parent scorer
+   * @param collectIntervals true if intervals will be collected
+   * @param iterators a list of iterators to combine
+   * @throws IOException if a low level I/O exception occurs
+   */
+  public ConjunctionIntervalIterator(Scorer scorer, boolean collectIntervals,
+      IntervalIterator... iterators) throws IOException {
+    this(scorer, collectIntervals, iterators.length, iterators);
+  }
+
+  /**
+   * Create a new ConjunctionIntervalIterator over a set of subiterators,
+   * with a minimum number of matching subiterators per document
+   * @param scorer the parent Scorer
+   * @param collectIntervals true if intervals will be collected
+   * @param minimuNumShouldMatch the number of subiterators that should
+   *                             match a document for a match to be returned
+   * @param iterators a list of iterators to combine
+   * @throws IOException if an low level I/O exception occurs
+   */
+  public ConjunctionIntervalIterator(Scorer scorer, boolean collectIntervals,
+      int minimuNumShouldMatch, IntervalIterator... iterators)
+      throws IOException {
+    super(scorer, collectIntervals);
+    this.iterators = iterators;
+    this.queue = new IntervalQueueAnd(iterators.length);
+    this.nrMustMatch = minimuNumShouldMatch;
+  }
+  
+  private void advance() throws IOException {
+    final IntervalRef top = queue.top();
+    Interval interval = null;
+    if ((interval = iterators[top.index].next()) != null) {
+      top.interval = interval;
+      queue.updateRightExtreme(top);
+      queue.updateTop();
+    } else {
+      queue.pop();
+    }
+  }
+  
+  @Override
+  public Interval next() throws IOException {
+    
+    while (queue.size() >= nrMustMatch
+        && queue.top().interval.begin == queue.currentCandidate.begin) {
+      advance();
+    }
+    if (queue.size() < nrMustMatch) {
+      return null;
+    }
+    do {
+      queue.updateCurrentCandidate();
+      Interval top = queue.top().interval; 
+      if (collectIntervals) {
+        snapShotSubPositions(); // this looks odd? -> see SnapShotCollector below for
+                                // details!
+      }
+      if (queue.currentCandidate.begin == top.begin
+          && queue.currentCandidate.end == top.end) {
+        return queue.currentCandidate;
+      }
+      rightExtremeBegin = queue.rightExtremeBegin;
+      advance();
+    } while (queue.size() >= nrMustMatch && queue.currentCandidate.end == queue.rightExtreme);
+    return queue.currentCandidate; // TODO support payloads
+  }
+  
+  
+  @Override
+  public int scorerAdvanced(final int docId) throws IOException {
+    if (docId == NO_MORE_DOCS) {
+      return NO_MORE_DOCS;
+    }
+    queue.reset();
+    for (int i = 0; i < iterators.length; i++) {
+      int scorerAdvanced = iterators[i].scorerAdvanced(docId);
+      if (scorerAdvanced != docId)
+        return scorerAdvanced;
+      assert scorerAdvanced == docId;
+      final Interval interval = iterators[i].next();
+      if (interval != null) {
+        IntervalRef intervalRef = new IntervalRef(interval, i); // TODO maybe
+                                                                // reuse?
+        queue.updateRightExtreme(intervalRef);
+        queue.add(intervalRef);
+      }
+    }
+    return docId;
+  }
+
+  @Override
+  public IntervalIterator[] subs(boolean inOrder) {
+    return iterators;
+  }
+  
+  
+  private void snapShotSubPositions() {
+    if (snapshot == null) {
+      snapshot = new SnapshotPositionCollector(queue.size());
+    }
+    snapshot.reset();
+    collectInternal(snapshot);
+  }
+  
+  private void collectInternal(IntervalCollector collector) {
+    assert collectIntervals;
+    collector.collectComposite(scorer, queue.currentCandidate, docID());
+    for (IntervalIterator iter : iterators) {
+      iter.collect(collector);
+    }
+    
+  }
+  
+  @Override
+  public void collect(IntervalCollector collector) {
+    assert collectIntervals;
+    if (snapshot == null) {
+      // we might not be initialized if the first interval matches
+      collectInternal(collector);
+    } else {
+      snapshot.replay(collector);
+    }
+  }
+
+  @Override
+  public int matchDistance() {
+    return (rightExtremeBegin) - (queue.currentTopEnd) -1; // align the match if pos are adjacent
+  }
+}
\ No newline at end of file
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/DisjunctionIntervalIterator.java b/lucene/core/src/java/org/apache/lucene/search/intervals/DisjunctionIntervalIterator.java
new file mode 100644
index 0000000..5c9c6b2
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/DisjunctionIntervalIterator.java
@@ -0,0 +1,111 @@
+package org.apache.lucene.search.intervals;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.intervals.IntervalQueue.IntervalRef;
+import org.apache.lucene.search.posfilter.Interval;
+
+import java.io.IOException;
+
+/**
+ * DisjunctionPositionIterator based on minimal interval semantics for OR
+ * operator
+ * 
+ * <a href=
+ * "http://vigna.dsi.unimi.it/ftp/papers/EfficientAlgorithmsMinimalIntervalSemantics"
+ * >"Efficient Optimally Lazy Algorithms for Minimal-Interval Semantics"</a>
+ * 
+ * @lucene.experimental
+ */
+public final class DisjunctionIntervalIterator extends IntervalIterator {
+
+  private final IntervalQueue queue;
+  private final IntervalIterator[] iterators;
+
+  /**
+   * Creates a new DisjunctionIntervalIterator over a set of IntervalIterators
+   * @param scorer the parent Scorer
+   * @param collectIntervals <code>true</code> if intervals will be collected
+   * @param intervals the IntervalIterators to iterate over
+   * @throws IOException if a low-level I/O error is encountered
+   */
+  public DisjunctionIntervalIterator(Scorer scorer, boolean collectIntervals, IntervalIterator... intervals)
+      throws IOException {
+    super(scorer, collectIntervals);
+    this.iterators = intervals;
+    queue = new IntervalQueueOr(intervals.length);
+  }
+
+  private void advance() throws IOException {
+    final IntervalRef top = queue.top();
+    Interval interval = null;
+    if ((interval = iterators[top.index].next()) != null) {
+      top.interval = interval;
+      queue.updateTop();
+    } else {
+      queue.pop();
+    }
+  }
+
+  @Override
+  public Interval next() throws IOException {
+    while (queue.size() > 0 && queue.top().interval.begin <= queue.currentCandidate.begin) {
+      advance();
+    }
+    if (queue.size() == 0) {
+      return null;
+    }
+    queue.updateCurrentCandidate();
+    return queue.currentCandidate; // TODO support payloads
+  }
+
+  @Override
+  public IntervalIterator[] subs(boolean inOrder) {
+    return iterators;
+  }
+
+  @Override
+  public void collect(IntervalCollector collector) {
+    assert collectIntervals;
+    collector.collectComposite(scorer, queue.currentCandidate, docID());
+    iterators[queue.top().index].collect(collector);
+  }
+
+  @Override
+  public int scorerAdvanced(int docId) throws IOException {
+    queue.reset();
+    for (int i = 0; i < iterators.length; i++) {
+      if (iterators[i].docID() <= docId) {
+        int scorerAdvanced = iterators[i].scorerAdvanced(docId);
+        //assert iterators[i].docID() == scorerAdvanced : " " + iterators[i];
+      }
+      if (iterators[i].docID() == docId) {
+        Interval interval = iterators[i].next();
+        if (interval != null)
+          queue.add(new IntervalRef(interval, i));
+      }
+    }
+    return this.docID();
+  }
+
+  @Override
+  public int matchDistance() {
+    return iterators[queue.top().index].matchDistance();
+  }
+
+}
\ No newline at end of file
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalCollector.java b/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalCollector.java
new file mode 100644
index 0000000..e04f2cde
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalCollector.java
@@ -0,0 +1,44 @@
+package org.apache.lucene.search.intervals;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.posfilter.Interval;
+
+/**
+ * Used for collecting matching {@link org.apache.lucene.search.posfilter.Interval}s from a search
+ */
+public interface IntervalCollector {
+
+  /**
+   * Collects an individual term match
+   * @param scorer the parent scorer
+   * @param interval the interval to collect
+   * @param docID the docID of the document matched
+   */
+  public void collectLeafPosition(Scorer scorer, Interval interval, int docID);
+
+  /**
+   * Collects a composite interval that may have sub-intervals
+   * @param scorer the parent scorer
+   * @param interval the interval to collect
+   * @param docID the docID of the document matched
+   */
+  public void collectComposite(Scorer scorer, Interval interval, int docID);
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalFilter.java b/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalFilter.java
new file mode 100644
index 0000000..9e8531a
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalFilter.java
@@ -0,0 +1,36 @@
+package org.apache.lucene.search.intervals;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Filters an {@link IntervalIterator}
+ *
+ * @see IntervalFilterQuery
+ */
+public interface IntervalFilter {
+
+  /**
+   * Filter the passed in IntervalIterator
+   * @param collectIntervals true if the returned {@link IntervalIterator} will
+   *                         be passed to an {@link IntervalCollector}
+   * @param iter the {@link IntervalIterator} to filter
+   * @return a filtered {@link IntervalIterator}
+   */
+  public abstract IntervalIterator filter(boolean collectIntervals, IntervalIterator iter);
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalFilterQuery.java b/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalFilterQuery.java
new file mode 100644
index 0000000..3ecb8a6
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalFilterQuery.java
@@ -0,0 +1,352 @@
+package org.apache.lucene.search.intervals;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermContext;
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermStatistics;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.Weight.PostingFeatures;
+import org.apache.lucene.search.posfilter.Interval;
+import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.Bits;
+
+import java.io.IOException;
+import java.util.Set;
+import java.util.TreeSet;
+
+/**
+ * A Query that filters the results of an inner {@link Query} using an
+ * {@link IntervalFilter}.
+ *
+ * @see org.apache.lucene.search.posfilter.OrderedNearQuery
+ * @see org.apache.lucene.search.posfilter.UnorderedNearQuery
+ * @see org.apache.lucene.search.posfilter.NonOverlappingQuery
+ *
+ * @lucene.experimental
+ */
+public class IntervalFilterQuery extends Query implements Cloneable {
+
+  private Query inner;
+  private final IntervalFilter filter;
+
+  /**
+   * Constructs a query using an inner query and an IntervalFilter
+   * @param inner the query to wrap
+   * @param filter the filter to restrict results by
+   */
+  public IntervalFilterQuery(Query inner, IntervalFilter filter) {
+    this.inner = inner;
+    this.filter = filter;
+  }
+
+  @Override
+  public void extractTerms(Set<Term> terms) {
+    inner.extractTerms(terms);
+  }
+
+  @Override
+  public Query rewrite(IndexReader reader) throws IOException {
+    IntervalFilterQuery clone = null;
+
+    Query rewritten =  inner.rewrite(reader);
+    if (rewritten != inner) {
+      clone = (IntervalFilterQuery) this.clone();
+      clone.inner = rewritten;
+    }
+
+    if (clone != null) {
+      return clone; // some clauses rewrote
+    } else {
+      return this; // no clauses rewrote
+    }
+  }
+
+  @Override
+  public Weight createWeight(IndexSearcher searcher) throws IOException {
+    return new IntervalFilterWeight(inner.createWeight(searcher), searcher);
+  }
+
+  class IntervalFilterWeight extends Weight {
+
+    private final Weight other;
+    private final Similarity similarity;
+    private final Similarity.SimWeight stats;
+
+    public IntervalFilterWeight(Weight other, IndexSearcher searcher) throws IOException {
+      this.other = other;
+      this.similarity = searcher.getSimilarity();
+      this.stats = getSimWeight(other.getQuery(), searcher);
+    }
+
+    private Similarity.SimWeight getSimWeight(Query query, IndexSearcher searcher)  throws IOException {
+      TreeSet<Term> terms = new TreeSet<Term>();
+      query.extractTerms(terms);
+      if (terms.size() == 0)
+        return null;
+      int i = 0;
+      TermStatistics[] termStats = new TermStatistics[terms.size()];
+      for (Term term : terms) {
+        TermContext state = TermContext.build(searcher.getTopReaderContext(), term);
+        termStats[i] = searcher.termStatistics(term, state);
+        i++;
+      }
+      final String field = terms.first().field(); // nocommit - should we be checking all filtered terms
+                                                  // are on the same field?
+      return similarity.computeWeight(query.getBoost(), searcher.collectionStatistics(field), termStats);
+
+    }
+
+    @Override
+    public Explanation explain(AtomicReaderContext context, int doc)
+        throws IOException {
+      Scorer scorer = scorer(context, true, false, PostingFeatures.POSITIONS,
+                              context.reader().getLiveDocs());
+      if (scorer != null) {
+        int newDoc = scorer.advance(doc);
+        if (newDoc == doc) {
+          float freq = scorer.freq();
+          Similarity.SimScorer docScorer = similarity.simScorer(stats, context);
+          ComplexExplanation result = new ComplexExplanation();
+          result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
+          Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "phraseFreq=" + freq));
+          result.addDetail(scoreExplanation);
+          result.setValue(scoreExplanation.getValue());
+          result.setMatch(true);
+          return result;
+        }
+      }
+      return new ComplexExplanation(false, 0.0f,
+          "No matching term within position filter");
+    }
+
+    @Override
+    public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
+        boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
+      if (stats == null)
+        return null;
+      flags = flags == PostingFeatures.DOCS_AND_FREQS ? PostingFeatures.POSITIONS : flags;
+      ScorerFactory factory = new ScorerFactory(other, context, topScorer, flags, acceptDocs);
+      final Scorer scorer = factory.scorer();
+      Similarity.SimScorer docScorer = similarity.simScorer(stats, context);
+      return scorer == null ? null : new IntervalFilterScorer(this, scorer, factory, docScorer);
+    }
+
+    @Override
+    public Query getQuery() {
+      return IntervalFilterQuery.this;
+    }
+    
+    @Override
+    public float getValueForNormalization() throws IOException {
+      return stats == null ? 1.0f : stats.getValueForNormalization();
+    }
+
+    @Override
+    public void normalize(float norm, float topLevelBoost) {
+      if (stats != null)
+        stats.normalize(norm, topLevelBoost);
+    }
+  }
+  
+  static class ScorerFactory {
+    final Weight weight;
+    final AtomicReaderContext context;
+    final boolean topScorer;
+    final PostingFeatures flags;
+    final Bits acceptDocs;
+    ScorerFactory(Weight weight,
+        AtomicReaderContext context, boolean topScorer, PostingFeatures flags,
+        Bits acceptDocs) {
+      this.weight = weight;
+      this.context = context;
+      this.topScorer = topScorer;
+      this.flags = flags;
+      this.acceptDocs = acceptDocs;
+    }
+    
+    public Scorer scorer() throws IOException {
+      return weight.scorer(context, true, topScorer, flags, acceptDocs);
+    }
+    
+  }
+
+  final static class CollectingFilteredIntervalIterator extends WrappedIntervalIterator {
+
+    CollectingFilteredIntervalIterator(Scorer scorer, IntervalFilter filter) throws IOException {
+      super(filter.filter(true, scorer.intervals(true)));
+    }
+
+    @Override
+    public int scorerAdvanced(int docId) throws IOException {
+      int target = scorer.advance(docId);
+      if (target > docId)
+        return target;
+      return inner.scorerAdvanced(target);
+    }
+
+    @Override
+    public String toString() {
+      return "CollectingFilteredIntervalIterator[" + inner + "]";
+    }
+  }
+
+  final class IntervalFilterScorer extends Scorer {
+
+    private final Scorer other;
+    private IntervalIterator filteredIterator;
+    private Interval current;
+    private final ScorerFactory factory;
+    private final Similarity.SimScorer docScorer;
+
+    public IntervalFilterScorer(Weight weight, Scorer other, ScorerFactory factory,
+                                Similarity.SimScorer docScorer) throws IOException {
+      super(weight);
+      this.other = other;
+      this.factory = factory;
+      this.filteredIterator = IntervalFilterQuery.this.filter.filter(false, other.intervals(false));
+      this.docScorer = docScorer;
+    }
+
+    @Override
+    public float score() throws IOException {
+      return docScorer.score(docID(), freq());
+    }
+
+    @Override
+    public long cost() {
+      return other.cost();
+    }
+
+    @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      if (collectIntervals) {
+        return new CollectingFilteredIntervalIterator(factory.scorer(), IntervalFilterQuery.this.filter);
+      }
+      
+      return new WrappedIntervalIterator(filteredIterator) {
+
+        private boolean buffered = true;
+        @Override
+        public int scorerAdvanced(int docId) throws IOException {
+          buffered = true;
+          assert docId == inner.docID();
+          return docId;
+        }
+
+        @Override
+        public Interval next() throws IOException {
+          if (buffered) {
+            buffered = false;
+            return current;
+          }
+          else if (current != null) {
+            return current = filteredIterator.next();
+          }
+          return null;
+        }
+        
+      };
+    }
+
+    @Override
+    public int docID() {
+      return other.docID();
+    }
+
+    @Override
+    public int nextDoc() throws IOException {
+      int docId = -1;
+      while ((docId = other.nextDoc()) != Scorer.NO_MORE_DOCS) {
+        filteredIterator.scorerAdvanced(docId);
+        if ((current = filteredIterator.next()) != null) { // just check if there is at least one interval that matches!
+          return other.docID();
+        }
+      }
+      return Scorer.NO_MORE_DOCS;
+    }
+
+    @Override
+    public int advance(int target) throws IOException {
+      int docId = other.advance(target);
+      if (docId == Scorer.NO_MORE_DOCS) {
+        return NO_MORE_DOCS;
+      }
+      do {
+        filteredIterator.scorerAdvanced(docId);
+        if ((current = filteredIterator.next()) != null) {
+          return other.docID();
+        }
+      } while ((docId = other.nextDoc()) != Scorer.NO_MORE_DOCS);
+      return NO_MORE_DOCS;
+    }
+
+    @Override
+    public int freq() throws IOException {
+      return 1; // nocommit how to calculate frequency?
+    }
+
+    public float sloppyFreq() throws IOException {
+      float freq = 0.0f;
+      do {
+        int d = filteredIterator.matchDistance();
+        freq += docScorer.computeSlopFactor(d);
+      }
+      while (filteredIterator.next() != null);
+      return freq;
+    }
+
+  }
+
+  @Override
+  public String toString(String field) {
+    return "Filtered(" + inner.toString() + ")";
+  }
+  
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = super.hashCode();
+    result = prime * result + ((filter == null) ? 0 : filter.hashCode());
+    result = prime * result + ((inner == null) ? 0 : inner.hashCode());
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) return true;
+    if (!super.equals(obj)) return false;
+    if (getClass() != obj.getClass()) return false;
+    IntervalFilterQuery other = (IntervalFilterQuery) obj;
+    if (filter == null) {
+      if (other.filter != null) return false;
+    } else if (!filter.equals(other.filter)) return false;
+    if (inner == null) {
+      if (other.inner != null) return false;
+    } else if (!inner.equals(other.inner)) return false;
+    return true;
+  }
+
+}
\ No newline at end of file
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalIterator.java b/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalIterator.java
new file mode 100644
index 0000000..fee9efc
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalIterator.java
@@ -0,0 +1,149 @@
+package org.apache.lucene.search.intervals;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.posfilter.Interval;
+
+import java.io.IOException;
+
+/**
+ * Iterator over the matching {@link org.apache.lucene.search.posfilter.Interval}s of a {@link Scorer}
+ *
+ * @lucene.experimental
+ */
+public abstract class IntervalIterator {
+
+  /** An empty array of IntervalIterators */
+  public static final IntervalIterator[] EMPTY = new IntervalIterator[0];
+
+  /** An IntervalIterator containing no further Intervals */
+  public static final IntervalIterator NO_MORE_INTERVALS = new EmptyIntervalIterator();
+
+  /** Integer representing no more documents */
+  public static final int NO_MORE_DOCS = Integer.MAX_VALUE;
+
+  protected final Scorer scorer;
+  protected final boolean collectIntervals;
+
+  /**
+   * Constructs an IntervalIterator over a {@link Scorer}
+   * @param scorer the {@link Scorer} to pull positions from
+   * @param collectIntervals true if positions will be collected
+   */
+  public IntervalIterator(Scorer scorer, boolean collectIntervals) {
+    this.scorer = scorer;
+    this.collectIntervals = collectIntervals;
+  }
+
+  /**
+   * Called after the parent scorer has been advanced.  If the scorer is
+   * currently positioned on docId, then subsequent calls to next() will
+   * return Intervals for that document; otherwise, no Intervals are
+   * available
+   * @param docId the document the parent scorer was advanced to
+   * @return the docId that the scorer is currently positioned at
+   * @throws IOException if a low-level I/O error is encountered
+   */
+  public abstract int scorerAdvanced(int docId) throws IOException;
+
+  /**
+   * Get the next Interval on the current document.
+   * @return the next Interval, or null if there are no remaining Intervals
+   * @throws IOException if a low-level I/O error is encountered
+   */
+  public abstract Interval next() throws IOException;
+
+  /**
+   * If intervals are to be collected, this will be called once
+   * for each Interval returned by the iterator.  The constructor
+   * must have been called with collectIntervals=true.
+   * @param collector an {@link IntervalCollector} to collect the
+   *                  Interval positions
+   * @see Scorer#intervals(boolean)                 
+   */
+  public abstract void collect(IntervalCollector collector);
+  
+  /**
+   * Get any sub-iterators
+   * 
+   * @param inOrder
+   *          true if the sub-iterators should be returned in the same order the
+   *          queries were provided
+   */
+  public abstract IntervalIterator[] subs(boolean inOrder);
+
+  /**
+   * Get the distance between matching subintervals
+   */
+  public abstract int matchDistance();
+
+  /**
+   * Get the current docID
+   */
+  public int docID() {
+    return scorer.docID();
+  }
+
+  /**
+   * Get this iterator's {@link Scorer}
+   */
+  public Scorer getScorer() {
+    return scorer;
+  }
+
+  /**
+   * An iterator that is always exhausted
+   */
+  private static final class EmptyIntervalIterator extends
+      IntervalIterator {
+    
+    public EmptyIntervalIterator() {
+      super(null, false);
+    }
+    
+    @Override
+    public int scorerAdvanced(int docId) throws IOException {
+      return IntervalIterator.NO_MORE_DOCS;
+    }
+    
+    @Override
+    public Interval next() throws IOException {
+      return null;
+    }
+    
+    @Override
+    public void collect(IntervalCollector collectoc) {}
+    
+    @Override
+    public IntervalIterator[] subs(boolean inOrder) {
+      return EMPTY;
+    }
+
+    @Override
+    public int matchDistance() {
+      return Integer.MAX_VALUE;
+    }
+
+    @Override
+    public int docID() {
+      return IntervalIterator.NO_MORE_DOCS;
+    }
+    
+  }
+  
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalQueue.java b/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalQueue.java
new file mode 100644
index 0000000..7ed432a0
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalQueue.java
@@ -0,0 +1,72 @@
+package org.apache.lucene.search.intervals;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import org.apache.lucene.search.intervals.IntervalQueue.IntervalRef;
+import org.apache.lucene.search.posfilter.Interval;
+import org.apache.lucene.util.PriorityQueue;
+
+/**
+ * Abstract base class for calculating minimal spanning intervals with Queues.
+ * @see IntervalQueueAnd
+ *  
+ * @lucene.experimental
+ * @lucene.internal
+ */
+abstract class IntervalQueue extends PriorityQueue<IntervalRef> {
+  /**
+   * The current interval spanning the queue
+   */
+  final Interval currentCandidate = new Interval(
+      Integer.MIN_VALUE, Integer.MIN_VALUE, -1, -1);
+  
+  /**
+   * Creates a new {@link IntervalQueue} with a fixed size
+   * @param size the size of the queue
+   */
+  public IntervalQueue(int size) {
+    super(size);
+  }
+  
+  /**
+   * Clears and resets the queue to its initial values;
+   */
+  void reset() {
+    clear();
+    currentCandidate.reset();
+  }
+
+  /**
+   * Called by the consumer each time the head of the queue was updated
+   */
+  abstract void updateCurrentCandidate();
+
+  /**
+   * Holds a reference to an interval and its index.
+   */
+  final static class IntervalRef {
+    Interval interval;
+    final int index;
+
+    IntervalRef(Interval interval, int index) {
+      super();
+      this.interval = interval;
+      this.index = index;
+    }
+  }
+
+}
\ No newline at end of file
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalQueueAnd.java b/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalQueueAnd.java
new file mode 100644
index 0000000..3f00ae5
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalQueueAnd.java
@@ -0,0 +1,85 @@
+package org.apache.lucene.search.intervals;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.posfilter.Interval;
+
+/**
+ * Queue class for calculating minimal spanning conjunction intervals
+ * @lucene.experimental
+ */ 
+final class IntervalQueueAnd extends IntervalQueue {
+  
+  /** the current right extreme positions of the queue */
+  int rightExtreme = Integer.MIN_VALUE;
+  /** the current right extreme offset of the queue */
+  int rightExtremeOffset = Integer.MIN_VALUE;
+  /** the current right extreme begin position*/
+  int rightExtremeBegin;  
+  /** the end of the internval on top of the queue*/
+  int currentTopEnd;
+  
+  /**
+   * Creates a new {@link IntervalQueueAnd} with a fixed size
+   * @param size the size of the queue
+   */
+  IntervalQueueAnd(int size) {
+    super(size);
+  }
+
+  @Override
+  void reset () {
+    super.reset();
+    rightExtreme = Integer.MIN_VALUE;
+    rightExtremeOffset = Integer.MIN_VALUE;
+  }
+  
+  /**
+   * Updates the right extreme of this queue if the end of the given interval is
+   * greater or equal than the current right extreme of the queue.
+   * 
+   * @param intervalRef the interval to compare
+   */
+  void updateRightExtreme(IntervalRef intervalRef) {
+    final Interval interval = intervalRef.interval;
+    if (rightExtreme <= interval.end) {
+      rightExtreme = interval.end;
+      rightExtremeOffset = interval.offsetEnd;
+      rightExtremeBegin = interval.begin;
+    }
+  }
+ 
+  @Override
+  void updateCurrentCandidate() {
+    final IntervalRef top = top();
+    Interval interval = top.interval;
+    currentCandidate.begin = interval.begin;
+    currentCandidate.offsetBegin = interval.offsetBegin;
+    currentCandidate.end = rightExtreme;
+    currentCandidate.offsetEnd = rightExtremeOffset;
+    currentTopEnd = interval.end;
+        
+  }
+  
+  @Override
+  protected boolean lessThan(IntervalRef left, IntervalRef right) {
+    final Interval a = left.interval;
+    final Interval b = right.interval;
+    return a.begin < b.begin || (a.begin == b.begin && a.end > b.end) || a.offsetBegin < b.offsetBegin;
+  }
+  
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalQueueOr.java b/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalQueueOr.java
new file mode 100644
index 0000000..b3905c3
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/IntervalQueueOr.java
@@ -0,0 +1,47 @@
+package org.apache.lucene.search.intervals;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.posfilter.Interval;
+
+/**
+ * Queue class for calculating minimal spanning disjunct intervals
+ * @lucene.experimental
+ */
+final class IntervalQueueOr extends IntervalQueue {
+  
+  /**
+   * Creates a new {@link IntervalQueueOr} with a fixed size
+   * @param size the size of the queue
+   */
+  IntervalQueueOr(int size) {
+    super(size);
+  }
+  
+  @Override
+  void updateCurrentCandidate() {
+    currentCandidate.copy(top().interval);
+  }
+  
+  @Override
+  protected boolean lessThan(IntervalRef left, IntervalRef right) {
+    final Interval a = left.interval;
+    final Interval b = right.interval;
+    return a.end < b.end || (a.end == b.end && a.begin >= b.begin);
+  }
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/OrderedConjunctionIntervalIterator.java b/lucene/core/src/java/org/apache/lucene/search/intervals/OrderedConjunctionIntervalIterator.java
new file mode 100644
index 0000000..982651f
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/OrderedConjunctionIntervalIterator.java
@@ -0,0 +1,167 @@
+package org.apache.lucene.search.intervals;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.posfilter.Interval;
+
+import java.io.IOException;
+
+/**
+ * An IntervalIterator based on minimum interval semantics for the
+ * AND< operator
+ *
+ * See <a href=
+ * "http://vigna.dsi.unimi.it/ftp/papers/EfficientAlgorithmsMinimalIntervalSemantics"
+ * >"Efficient Optimally Lazy Algorithms for Minimal-Interval Semantics"</a>
+ *
+ * @lucene.experimental
+ */
+public final class OrderedConjunctionIntervalIterator extends
+    IntervalIterator {
+
+  private final IntervalIterator[] iterators;
+  private final Interval[] intervals;
+  private final int lastIter;
+  private final Interval interval = new Interval();
+
+  private int index = 1;
+  private int matchDistance = 0;
+
+  private SnapshotPositionCollector snapshot = null;
+  private boolean collectLeaves = true;
+
+  /**
+   * Create an OrderedConjunctionIntervalIterator over a composite IntervalIterator
+   * @param collectIntervals true if intervals will be collected
+   * @param other a composite IntervalIterator to wrap
+   */
+  public OrderedConjunctionIntervalIterator(boolean collectIntervals, boolean collectLeaves, IntervalIterator other) {
+    this(other.scorer, collectIntervals, other.subs(true));
+    this.collectLeaves = collectLeaves;
+  }
+
+  public OrderedConjunctionIntervalIterator(boolean collectIntervals, IntervalIterator other) {
+    this(collectIntervals, true, other);
+  }
+
+  /**
+   * Create an OrderedConjunctionIntervalIterator over a set of subiterators
+   * @param scorer the parent Scorer
+   * @param collectIntervals true if intervals will be collected
+   * @param iterators the subintervals to wrap
+   */
+  public OrderedConjunctionIntervalIterator(Scorer scorer, boolean collectIntervals, IntervalIterator... iterators) {
+    super(scorer, collectIntervals);
+    this.iterators = iterators;
+    assert iterators.length > 1;
+    intervals = new Interval[iterators.length];
+    lastIter = iterators.length - 1;
+  }
+
+  @Override
+  public Interval next() throws IOException {
+    if(intervals[0] == null) {
+      return null;
+    }
+    interval.setMaximum();
+    int b = Integer.MAX_VALUE;
+    while (true) {
+      while (true) {
+        final Interval previous = intervals[index - 1];
+        if (previous.end >= b) {
+          return interval.begin == Integer.MAX_VALUE ? null : interval;
+        }
+        if (index == intervals.length || intervals[index].begin > previous.end) {
+          break;
+        }
+        Interval current = intervals[index];
+        do {
+          final Interval next;
+          if (current.end >= b || (next = iterators[index].next()) == null) {
+            return interval.begin == Integer.MAX_VALUE ? null : interval;
+          }
+          current = intervals[index] = next;
+        } while (current.begin <= previous.end);
+        index++;
+      }
+      interval.update(intervals[0], intervals[lastIter]);
+      matchDistance = (intervals[lastIter].begin - lastIter) - intervals[0].end;
+      b = intervals[lastIter].begin;
+      index = 1;
+      if (collectIntervals)
+        snapshotSubPositions();
+      intervals[0] = iterators[0].next();
+      if (intervals[0] == null) {
+        return interval.begin == Integer.MAX_VALUE ? null : interval;
+      }
+    }
+  }
+
+  @Override
+  public IntervalIterator[] subs(boolean inOrder) {
+    return iterators;
+  }
+
+  @Override
+  public void collect(IntervalCollector collector) {
+    assert collectIntervals;
+    if (snapshot == null) {
+      // we might not be initialized if the first interval matches
+      collectInternal(collector);
+    } else {
+      snapshot.replay(collector);
+    }
+  }
+
+  private void snapshotSubPositions() {
+    if (snapshot == null) {
+      snapshot = new SnapshotPositionCollector(iterators.length);
+    }
+    snapshot.reset();
+    collectInternal(snapshot);
+  }
+
+  private void collectInternal(IntervalCollector collector) {
+    assert collectIntervals;
+    collector.collectComposite(scorer, interval, docID());
+    if (collectLeaves) {
+      for (IntervalIterator iter : iterators) {
+        iter.collect(collector);
+      }
+    }
+  }
+
+  @Override
+  public int scorerAdvanced(int docId) throws IOException {
+    assert scorer.docID() == docId;
+    for (int i = 0; i < iterators.length; i++) {
+      int advanceTo = iterators[i].scorerAdvanced(docId);
+      assert advanceTo == docId;
+      intervals[i] = Interval.INFINITE_INTERVAL;
+    }
+    intervals[0] = iterators[0].next();
+    index = 1;
+    return scorer.docID();
+  }
+
+  @Override
+  public int matchDistance() {
+    return matchDistance;
+  }
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/RangeIntervalFilter.java b/lucene/core/src/java/org/apache/lucene/search/intervals/RangeIntervalFilter.java
new file mode 100644
index 0000000..1c95164
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/RangeIntervalFilter.java
@@ -0,0 +1,99 @@
+package org.apache.lucene.search.intervals;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.posfilter.Interval;
+
+import java.io.IOException;
+
+/**
+ * An IntervalFilter that restricts Intervals returned by an IntervalIterator
+ * to those which occur between a given start and end position.
+ *
+ * @lucene.experimental
+ */
+public class RangeIntervalFilter implements IntervalFilter {
+
+  private int start;
+  private int end;
+
+  /**
+   * Constructs a new RangeIntervalFilter
+   * @param start the start of the filtered range
+   * @param end the end of the filtered range
+   */
+  public RangeIntervalFilter(int start, int end) {
+    this.start = start;
+    this.end = end;
+  }
+
+  @Override
+  public IntervalIterator filter(boolean collectIntervals, IntervalIterator iter) {
+    return new RangeIntervalIterator(collectIntervals, iter);
+  }
+
+  /**
+   * Wraps an IntervalIterator ignoring Intervals that fall outside a
+   * given range.
+   */
+  private class RangeIntervalIterator extends IntervalIterator {
+
+    private final IntervalIterator iterator;
+    private Interval interval;
+
+    RangeIntervalIterator(boolean collectIntervals, IntervalIterator iter) {
+      super(iter == null ? null : iter.scorer, collectIntervals);
+      this.iterator = iter;
+    }
+
+    @Override
+    public Interval next() throws IOException {
+      while ((interval = iterator.next()) != null) {
+        if(interval.end > end) {
+          return null;
+        } else if (interval.begin >= start) {
+          return interval;
+        }
+      }
+      return null;
+    }
+
+    @Override
+    public IntervalIterator[] subs(boolean inOrder) {
+      return new IntervalIterator[] { iterator };
+    }
+
+    @Override
+    public void collect(IntervalCollector collector) {
+      assert collectIntervals;
+      collector.collectComposite(null, interval, iterator.docID());
+      iterator.collect(collector);
+    }
+
+    @Override
+    public int scorerAdvanced(int docId) throws IOException {
+      return iterator.scorerAdvanced(docId);
+    }
+
+    @Override
+    public int matchDistance() {
+      return iterator.matchDistance();
+    }
+
+  }
+
+}
\ No newline at end of file
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/SloppyIntervalIterator.java b/lucene/core/src/java/org/apache/lucene/search/intervals/SloppyIntervalIterator.java
new file mode 100644
index 0000000..37b0174
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/SloppyIntervalIterator.java
@@ -0,0 +1,236 @@
+package org.apache.lucene.search.intervals;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.posfilter.Interval;
+
+import java.io.IOException;
+
+/**
+ * An interval iterator that has the semantics of sloppy phrase query.
+ */
+public class SloppyIntervalIterator extends IntervalIterator {
+  private final int maxLen;
+  private int matchDistance;
+  private final IntervalIterator iterator;
+
+  /**
+   * Create a SloppyIntervalIterator that matches subiterators within
+   * a specified maxLength
+   * @param scorer the parent Scorer
+   * @param maxLength the maximum distance between the first and last subiterator match
+   * @param collectIntervals true if intervals will be collected
+   * @param iterators the subiterators
+   * @throws IOException if an low level I/O exception occurs
+   */
+  public SloppyIntervalIterator(Scorer scorer, int maxLength,
+      boolean collectIntervals, IntervalIterator... iterators)
+      throws IOException {
+    super(scorer, collectIntervals);
+    this.maxLen = maxLength;
+    this.iterator = new ConjunctionIntervalIterator(scorer, collectIntervals, iterators);
+  }
+  
+  @Override
+  public Interval next() throws IOException {
+    Interval current;
+    do {
+      if ((current = iterator.next()) != null) {
+        matchDistance = current.end - current.begin;
+        if (matchDistance <= maxLen) {
+//          System.out.println(matchDistance);
+          break;
+        }
+      } else {
+        break;
+      }
+    } while (true);
+    return current;
+  }
+  
+  @Override
+  public int scorerAdvanced(int docId) throws IOException {
+    return iterator.scorerAdvanced(docId);
+  }
+  
+  public int matchDistance() {
+    return matchDistance;
+  }
+  
+  public static IntervalIterator create(Scorer scorer, boolean collectIntervals,
+        IntervalIterator iterator, int... offsets) {
+    if (offsets.length == 1) {
+      return new SingleSlopplyIntervalIterator(scorer, collectIntervals, iterator, offsets[0]);
+    } else {
+      return new SloppyGroupIntervalIterator(scorer, collectIntervals, iterator, offsets);
+    }
+    
+  }
+  
+  private final static class SingleSlopplyIntervalIterator extends
+      IntervalIterator {
+    private Interval realInterval;
+    private final Interval sloppyInterval = new Interval();
+    private final IntervalIterator iterator;
+    private int offset;
+    
+    public SingleSlopplyIntervalIterator(Scorer scorer,
+        boolean collectIntervals, IntervalIterator iterator, int offset) {
+      super(scorer, collectIntervals);
+      this.iterator = iterator;
+      this.offset = offset;
+    }
+    
+    @Override
+    public int scorerAdvanced(int docId) throws IOException {
+      return iterator.scorerAdvanced(docId);
+    }
+    
+    @Override
+    public Interval next() throws IOException {
+      if ((realInterval = iterator.next()) != null) {
+        sloppyInterval.begin = sloppyInterval.end = realInterval.begin - offset;
+        sloppyInterval.offsetBegin = realInterval.offsetBegin;
+        sloppyInterval.offsetEnd = realInterval.offsetEnd;
+        return sloppyInterval;
+      }
+      return null;
+    }
+    
+    @Override
+    public void collect(IntervalCollector collector) {
+      collector.collectLeafPosition(scorer, realInterval, docID());
+      
+    }
+    
+    @Override
+    public IntervalIterator[] subs(boolean inOrder) {
+      return null;
+    }
+
+    @Override
+    public int matchDistance() {
+      return sloppyInterval.end - sloppyInterval.begin;
+    }
+    
+  }
+  
+  private final static class SloppyGroupIntervalIterator extends
+      IntervalIterator {
+    
+    private final Interval sloppyGroupInterval = new Interval();
+    private final int[] offsets;
+    private final Interval[] intervalPositions;
+    private final IntervalIterator groupIterator;
+    private int currentIndex;
+    private boolean initialized;
+    
+    public SloppyGroupIntervalIterator(Scorer scorer, boolean collectIntervals,
+        IntervalIterator groupIterator, int... offsets) {
+      super(scorer, collectIntervals);
+      this.offsets = offsets;
+      this.groupIterator = groupIterator;
+      this.intervalPositions = new Interval[offsets.length];
+      for (int i = 0; i < intervalPositions.length; i++) {
+        intervalPositions[i] = new Interval();
+      }
+    }
+    
+    @Override
+    public int scorerAdvanced(int docId) throws IOException {
+      initialized = false;
+      return groupIterator.scorerAdvanced(docId);
+    }
+    
+    @Override
+    public Interval next() throws IOException {
+      sloppyGroupInterval.begin = Integer.MAX_VALUE;
+      sloppyGroupInterval.end = Integer.MIN_VALUE;
+      if (!initialized) {
+        initialized = true;
+        
+        currentIndex = 0;
+        for (int i = 0; i < offsets.length; i++) {
+          Interval current;
+          if ((current = groupIterator.next()) != null) {
+            intervalPositions[i].copy(current);
+
+            int p = current.begin - offsets[i];
+            sloppyGroupInterval.begin = Math.min(sloppyGroupInterval.begin, p);
+            sloppyGroupInterval.end = Math.max(sloppyGroupInterval.end, p);
+          } else {
+            return null;
+          }
+        }
+        sloppyGroupInterval.offsetBegin = intervalPositions[0].offsetBegin;
+        sloppyGroupInterval.offsetEnd = intervalPositions[intervalPositions.length-1].offsetEnd;
+        return sloppyGroupInterval;
+      }
+      Interval current;
+      if ((current = groupIterator.next()) != null) {
+        final int currentFirst = currentIndex++ % intervalPositions.length;
+        intervalPositions[currentFirst].copy(current);
+        int currentIdx = currentIndex;
+        for (int i = 0; i < intervalPositions.length; i++) { // find min / max
+          int idx = currentIdx++ % intervalPositions.length;
+          int p = intervalPositions[idx].begin - offsets[i];
+          sloppyGroupInterval.begin = Math.min(sloppyGroupInterval.begin, p);
+          sloppyGroupInterval.end = Math.max(sloppyGroupInterval.end, p);
+        }
+        sloppyGroupInterval.offsetBegin = intervalPositions[currentIndex % intervalPositions.length].offsetBegin;
+        sloppyGroupInterval.offsetEnd = intervalPositions[currentFirst].offsetEnd;
+        return sloppyGroupInterval;
+      }
+      return null;
+    }
+    
+    @Override
+    public void collect(IntervalCollector collector) {
+      int currentIdx = currentIndex+1;
+      for (int i = 0; i < intervalPositions.length; i++) { // find min / max
+        int idx = currentIdx++ % intervalPositions.length;
+        collector.collectLeafPosition(scorer, intervalPositions[idx],
+            docID());
+      }
+      
+    }
+    
+    @Override
+    public IntervalIterator[] subs(boolean inOrder) {
+      return new IntervalIterator[] {groupIterator};
+    }
+
+    @Override
+    public int matchDistance() {
+      return sloppyGroupInterval.end - sloppyGroupInterval.begin;
+    }
+    
+  }
+  
+  @Override
+  public void collect(IntervalCollector collector) {
+    assert collectIntervals;
+    this.iterator.collect(collector);
+    
+  }
+  
+  @Override
+  public IntervalIterator[] subs(boolean inOrder) {
+    return null;
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/SnapshotPositionCollector.java b/lucene/core/src/java/org/apache/lucene/search/intervals/SnapshotPositionCollector.java
new file mode 100644
index 0000000..3b9a7dc
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/SnapshotPositionCollector.java
@@ -0,0 +1,115 @@
+package org.apache.lucene.search.intervals;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.posfilter.Interval;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.RamUsageEstimator;
+
+/**
+ * An IntervalCollector that allows a snapshot of the state of an
+ * IntervalIterator to be taken before it is advanced.
+ *
+ * Conjunction iterators advance their subiterators before the consumer
+ * can call collect on the top level iterator.  If intervals are to be
+ * collected, we need to record the last possible match so that we can
+ * return the correct intervals for the match.
+ *
+ * @lucene.internal
+ */
+final class SnapshotPositionCollector implements IntervalCollector {
+
+  private SingleSnapshot[] snapshots;
+  private int index = 0;
+
+  /**
+   * Create a new collector with n snapshots
+   * @param subs the number of subiterators to record
+   */
+  SnapshotPositionCollector(int subs) {
+    snapshots = new SingleSnapshot[subs];
+  }
+
+  @Override
+  public void collectLeafPosition(Scorer scorer, Interval interval,
+      int docID) {
+    collect(scorer, interval, docID, true);
+
+  }
+
+  private void collect(Scorer scorer, Interval interval, int docID,
+      boolean isLeaf) {
+    if (snapshots.length <= index) {
+      grow(ArrayUtil.oversize(index + 1,
+          (RamUsageEstimator.NUM_BYTES_OBJECT_REF * 2)
+              + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER
+              + RamUsageEstimator.NUM_BYTES_BOOLEAN
+              + RamUsageEstimator.NUM_BYTES_INT));
+    }
+    if (snapshots[index] == null) {
+      snapshots[index] = new SingleSnapshot();
+    }
+    snapshots[index++].set(scorer, interval, isLeaf, docID);
+  }
+
+  @Override
+  public void collectComposite(Scorer scorer, Interval interval,
+      int docID) {
+    collect(scorer, interval, docID, false);
+  }
+
+  void replay(IntervalCollector collector) {
+    for (int i = 0; i < index; i++) {
+      SingleSnapshot singleSnapshot = snapshots[i];
+      if (singleSnapshot.isLeaf) {
+        collector.collectLeafPosition(singleSnapshot.scorer,
+            singleSnapshot.interval, singleSnapshot.docID);
+      } else {
+        collector.collectComposite(singleSnapshot.scorer,
+            singleSnapshot.interval, singleSnapshot.docID);
+      }
+    }
+  }
+
+  void reset() {
+    index = 0;
+  }
+
+  private void grow(int size) {
+    final SingleSnapshot[] newArray = new SingleSnapshot[size];
+    System.arraycopy(snapshots, 0, newArray, 0, index);
+    snapshots = newArray;
+  }
+
+  private static final class SingleSnapshot {
+    Scorer scorer;
+    final Interval interval = new Interval();
+    boolean isLeaf;
+    int docID;
+
+    void set(Scorer scorer, Interval interval, boolean isLeaf,
+        int docID) {
+      this.scorer = scorer;
+      this.interval.copy(interval);
+      this.isLeaf = isLeaf;
+      this.docID = docID;
+    }
+  }
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/TermIntervalIterator.java b/lucene/core/src/java/org/apache/lucene/search/intervals/TermIntervalIterator.java
new file mode 100644
index 0000000..180dbb9
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/TermIntervalIterator.java
@@ -0,0 +1,126 @@
+package org.apache.lucene.search.intervals;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.posfilter.Interval;
+
+import java.io.IOException;
+
+
+/**
+ * Iterates over the individual positions of a term in a document
+ */
+public final class TermIntervalIterator extends IntervalIterator {
+
+  private final Interval interval;
+  int positionsPending;
+  private final DocsEnum docsAndPos;
+  private int docID = -1;
+
+  /**
+   * Constructs a new TermIntervalIterator
+   * @param scorer the parent Scorer
+   * @param docsAndPos a DocsAndPositionsEnum positioned on the current document
+   * @param doPayloads true if payloads should be retrieved for the positions
+   * @param collectIntervals true if positions will be collected
+   */
+  public TermIntervalIterator(Scorer scorer, DocsEnum docsAndPos,
+                              boolean doPayloads, boolean collectIntervals) {
+    super(scorer, collectIntervals);
+    this.docsAndPos = docsAndPos;
+    this.interval = new Interval();
+  }
+
+  @Override
+  public Interval next() throws IOException {
+    if (--positionsPending >= 0) {
+      interval.begin = interval.end = docsAndPos.nextPosition();
+      interval.offsetBegin = docsAndPos.startOffset();
+      interval.offsetEnd = docsAndPos.endOffset();
+      return interval;
+    }
+    positionsPending = 0;
+    return null;
+  }
+
+  @Override
+  public int docID() {
+    return docID;
+  }
+
+  @Override
+  public IntervalIterator[] subs(boolean inOrder) {
+    return EMPTY;
+  }
+
+  @Override
+  public void collect(IntervalCollector collector) {
+    collector.collectLeafPosition(scorer, interval, docID);
+  }
+
+  @Override
+  public int scorerAdvanced(int docId) throws IOException {
+//    interval.reset();
+    if (docsAndPos.docID() == docId) {
+      positionsPending = docsAndPos.freq();
+    } else {
+      positionsPending = -1;
+    }
+    return docID = docsAndPos.docID();
+  }
+  
+  @Override
+  public String toString() {
+    return "TermPositions [interval=" + interval + ", positionsPending="
+        + positionsPending + ", docID=" + docID + "]";
+  }
+
+  @Override
+  public int matchDistance() {
+    return 0;
+  }
+// TODO not supported yet - need to figure out what that means really to support payloads
+//  private static final class PayloadInterval extends Interval {
+//    private int pos = -1;
+//    private final DocsAndPositionsEnum payloads;
+//    private final TermIntervalIterator termPos;
+//
+//    public PayloadInterval(DocsAndPositionsEnum payloads, TermIntervalIterator pos) {
+//      this.payloads = payloads;
+//      this.termPos = pos;
+//    }
+//
+//    @Override
+//    public BytesRef nextPayload() throws IOException {
+//      if (pos == termPos.positionsPending) {
+//        return null;
+//      } else {
+//        pos = termPos.positionsPending;
+//        return payloads.getPayload();
+//      }
+//    }
+//
+//    @Override
+//    public void reset() {
+//      super.reset();
+//      pos = -1;
+//    }
+//
+//  }
+}
\ No newline at end of file
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/WithinIntervalFilter.java b/lucene/core/src/java/org/apache/lucene/search/intervals/WithinIntervalFilter.java
new file mode 100644
index 0000000..6e57d6e
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/WithinIntervalFilter.java
@@ -0,0 +1,119 @@
+package org.apache.lucene.search.intervals;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import org.apache.lucene.search.posfilter.Interval;
+
+import java.io.IOException;
+
+
+/**
+ * An IntervalFilter that restricts Intervals returned by an IntervalIterator
+ * to those which have a matchDistance less than a defined slop.
+ *
+ * @lucene.experimental
+ */
+public class WithinIntervalFilter implements IntervalFilter {
+
+  private final int slop;
+  private boolean collectLeaves = true;
+
+  /**
+   * Construct a new WithinIntervalFilter
+   * @param slop the maximum slop allowed for subintervals
+   */
+  public WithinIntervalFilter(int slop) {
+    this.slop = slop;
+  }
+
+  /**
+   * Construct a new WithinIntervalFilter
+   * @param slop the maximum slop allowed for subintervals
+   */
+  public WithinIntervalFilter(int slop, boolean collectLeaves) {
+    this.slop = slop;
+    this.collectLeaves = collectLeaves;
+  }
+
+  /**
+   * @return the slop
+   */
+  public int getSlop() {
+    return slop;
+  }
+
+  @Override
+  public IntervalIterator filter(boolean collectIntervals, IntervalIterator iter) {
+    return new WithinIntervalIterator(collectIntervals, iter);
+  }
+
+  class WithinIntervalIterator extends IntervalIterator {
+
+    private IntervalIterator iterator;
+    private Interval interval;
+
+    WithinIntervalIterator(boolean collectIntervals, IntervalIterator iter) {
+      super(iter == null ? null : iter.scorer, collectIntervals);
+      this.iterator = iter;
+    }
+
+    @Override
+    public Interval next() throws IOException {
+      while ((interval = iterator.next()) != null) {
+        if((iterator.matchDistance()) <= slop){
+          return interval;
+        }
+      }
+      return null;
+    }
+
+    @Override
+    public IntervalIterator[] subs(boolean inOrder) {
+      return new IntervalIterator[] {iterator};
+    }
+
+
+    @Override
+    public void collect(IntervalCollector collector) {
+      assert collectIntervals;
+      collector.collectComposite(null, interval, iterator.docID());
+      if (collectLeaves)
+        iterator.collect(collector);
+    }
+
+    @Override
+    public int scorerAdvanced(int docId) throws IOException {
+      return iterator.scorerAdvanced(docId);
+    }
+
+    @Override
+    public int matchDistance() {
+      return iterator.matchDistance();
+    }
+
+    @Override
+    public String toString() {
+      return "WithinIntervalIterator[" + iterator.docID() + ":" + interval + "]";
+    }
+
+    @Override
+    public int docID() {
+      return iterator.docID();
+    }
+
+  }
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/WithinOrderedFilter.java b/lucene/core/src/java/org/apache/lucene/search/intervals/WithinOrderedFilter.java
new file mode 100644
index 0000000..57f4494
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/WithinOrderedFilter.java
@@ -0,0 +1,56 @@
+package org.apache.lucene.search.intervals;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * An IntervalFilter that restricts an IntervalIterator to return
+ * only Intervals that occur in order within a given distance.
+ *
+ * @see WithinIntervalFilter
+ */
+public class WithinOrderedFilter implements IntervalFilter {
+
+  private final WithinIntervalFilter innerFilter;
+  private final boolean collectLeaves;
+
+  /**
+   * Constructs a new WithinOrderedFilter with a given slop
+   * @param slop The maximum distance allowed between subintervals
+   * @param collectLeaves false if only the parent interval should be collected
+   */
+  public WithinOrderedFilter(int slop, boolean collectLeaves) {
+    this.innerFilter = new WithinIntervalFilter(slop);
+    this.collectLeaves = collectLeaves;
+  }
+
+  public WithinOrderedFilter(int slop) {
+    this(slop, true);
+  }
+
+  @Override
+  public IntervalIterator filter(boolean collectIntervals, IntervalIterator iter) {
+    return innerFilter.filter(collectIntervals,
+                              new OrderedConjunctionIntervalIterator(collectIntervals, collectLeaves, iter));
+  }
+
+  @Override
+  public String toString() {
+    return "WithinOrderedFilter[" + this.innerFilter.getSlop() + "]";
+  }
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/WrappedIntervalIterator.java b/lucene/core/src/java/org/apache/lucene/search/intervals/WrappedIntervalIterator.java
new file mode 100644
index 0000000..e011836
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/WrappedIntervalIterator.java
@@ -0,0 +1,63 @@
+package org.apache.lucene.search.intervals;
+
+import org.apache.lucene.search.posfilter.Interval;
+
+import java.io.IOException;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class WrappedIntervalIterator extends IntervalIterator {
+
+  protected final IntervalIterator inner;
+
+  protected WrappedIntervalIterator(IntervalIterator inner) {
+    super(inner.scorer, inner.collectIntervals);
+    this.inner = inner;
+  }
+
+  @Override
+  public int scorerAdvanced(int docId) throws IOException {
+    return inner.scorerAdvanced(docId);
+  }
+
+  @Override
+  public Interval next() throws IOException {
+    return inner.next();
+  }
+
+  @Override
+  public void collect(IntervalCollector collector) {
+    inner.collect(collector);
+  }
+
+  @Override
+  public IntervalIterator[] subs(boolean inOrder) {
+    return inner.subs(inOrder);
+  }
+
+  @Override
+  public int matchDistance() {
+    return inner.matchDistance();
+  }
+
+  @Override
+  public int docID() {
+    return inner.docID();
+  }
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/intervals/package.html b/lucene/core/src/java/org/apache/lucene/search/intervals/package.html
new file mode 100644
index 0000000..75eac5f
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/intervals/package.html
@@ -0,0 +1,70 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<HTML>
+<HEAD>
+    <TITLE>org.apache.lucene.search.intervals</TITLE>
+</HEAD>
+<BODY>
+<h2>Interval Iterators</h2>
+<p>
+Lucene offers extensive query and scoring flexibility including boolean queries, specialized phrase queries, wildcards and many more. The intervals package aims
+to provide a common interface to Lucene's proximity features available on all core queries. The central class in this package is
+{@link org.apache.lucene.search.intervals.IntervalIterator IntervalIterator}, which allows iterative consumption of term positions and offsets on complex queries.
+{@link org.apache.lucene.search.Scorer Scorer} exposes direct access to the queries' {@link org.apache.lucene.search.intervals.IntervalIterator IntervalIterator} reflecting a logical view
+of the scorer on positions and offsets for each matching document.</p>
+<p>
+Intervals are entirely detached from scoring/matching documents and have no effect on query performance if proximity information or offsets are not needed or consumed. Its lazy nature requires
+the user to specify the need for positions/offsets at scorer creation time per segment allowing for a large number of usecases:
+
+<ul>
+<li>Proximity matching without scoring ie. if token positions are needed for filtering out documents but the actual query score should not be modified</li>
+<li>Second pass scoring ie. for high-performance proximity queries common practice is to re-score the top N (usually a large N) results of a non-proximity query with proximity information to improve precision.</li>
+<li>Collecting an exhaustive list of intervals per query ie. complex queries might be interested in actual term positions across the entire query tree</li>
+<li>Highlighting queries without re-analyzing the document or storing term vectors if offsets are stored in the index. Especially large documents will see a tremendous performance and space-consumption improvement over term-vectors / re-analyzing</li>
+<li>Specializing queries for exotic proximity operators based on core queries</li>
+</ul>
+
+<h2>Core Iterators and Queries</h2>
+
+The intervals package provides a basic set of {@link org.apache.lucene.search.intervals.IntervalIterator IntervalIterator} and {@link org.apache.lucene.search.Query Query} implementation 
+based on minimum interval semantics, as defined in
+<a href="http://vigna.dsi.unimi.it/ftp/papers/EfficientAlgorithmsMinimalIntervalSemantics">"Efficient Optimally Lazy Algorithms for Minimal-Interval Semantics"</a>
+<p>
+  The following {@link org.apache.lucene.search.intervals.IntervalIterator IntervalIterator} implementations are provided:
+  <ol>
+    <li>{@link org.apache.lucene.search.intervals.BlockIntervalIterator - BlockIntervalIterator} -- an iterator providing an ordered <i>phrasal operator</i> with given gaps between sub-iterators</li>
+    <li>{@link org.apache.lucene.search.intervals.OrderedConjunctionIntervalIterator - OrderedConjunctionIntervalIterator} -- an iterator providing an <i>ordered non-overlapping conjunction operator</i></li>
+    <li>{@link org.apache.lucene.search.intervals.ConjunctionIntervalIterator - ConjunctionIntervalIterator} -- an iterator providing a <i>unordered conjunction operator</i></li>
+    <li>{@link org.apache.lucene.search.intervals.BrouwerianIntervalIterator - BrouwerianIntervalIterator} -- an iterator computing the non-overlapping difference between two iterators</li>
+    <li>{@link org.apache.lucene.search.intervals.DisjunctionIntervalIterator - DisjunctionIntervalIterator} -- an iterator providing a <i>unordered disjunction operator</i></li>
+  </ol>
+  All queries require positions to be stored in the index.
+</p>
+
+<p>
+  The following Query implementations are provided:
+  <ol>
+    <li>{@link org.apache.lucene.search.intervals.IntervalFilterQuery - IntervalFilterQuery} -- Filters a Query based on the positions or ranges of its component parts</li>
+    <li>{@link org.apache.lucene.search.intervals.OrderedNearQuery - OrderedNearQuery} -- Filters queries based on the ordered difference between their match positions in a document</li>
+    <li>{@link org.apache.lucene.search.intervals.UnorderedNearQuery - UnorderedNearQuery} -- Filters queries based on the unordered difference between their match positions in a document</li>
+    <li>{@link org.apache.lucene.search.intervals.NonOverlappingQuery - NonOverlappingQuery} -- Filters out queries with overlapping match positions</li>
+  </ol>
+  All queries require positions to be stored in the index.
+</p>
+</BODY>
+</HTML>
diff --git a/lucene/core/src/java/org/apache/lucene/search/package.html b/lucene/core/src/java/org/apache/lucene/search/package.html
index 1a9e577..dce0838 100644
--- a/lucene/core/src/java/org/apache/lucene/search/package.html
+++ b/lucene/core/src/java/org/apache/lucene/search/package.html
@@ -430,8 +430,9 @@
                 that scores via a {@link org.apache.lucene.search.similarities.Similarity Similarity} will just defer to the Similarity's implementation:
                 {@link org.apache.lucene.search.similarities.Similarity.SimWeight#normalize SimWeight#normalize(float,float)}.</li>
             <li>
-                {@link org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.AtomicReaderContext, boolean, boolean, org.apache.lucene.util.Bits)
-                  scorer(AtomicReaderContext context, boolean scoresDocsInOrder, boolean topScorer, Bits acceptDocs)} —
+                {@link org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.AtomicReaderContext, boolean, boolean,
+                  org.apache.lucene.search.Weight.PostingFeatures, org.apache.lucene.util.Bits)
+                  scorer(AtomicReaderContext context, boolean scoresDocsInOrder, boolean topScorer, PostingFeatures flags, Bits acceptDocs)} —
                 Construct a new {@link org.apache.lucene.search.Scorer Scorer} for this Weight. See <a href="#scorerClass">The Scorer Class</a>
                 below for help defining a Scorer. As the name implies, the Scorer is responsible for doing the actual scoring of documents 
                 given the Query.
diff --git a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
index 31034ea..c217eb1 100644
--- a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
@@ -149,14 +149,14 @@
 
     @Override
     public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-        boolean topScorer, Bits acceptDocs) throws IOException {
+        boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
       return new PayloadNearSpanScorer(query.getSpans(context, acceptDocs, termContexts), this,
           similarity, similarity.simScorer(stats, context));
     }
     
     @Override
     public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
-      PayloadNearSpanScorer scorer = (PayloadNearSpanScorer) scorer(context, true, false, context.reader().getLiveDocs());
+      PayloadNearSpanScorer scorer = (PayloadNearSpanScorer) scorer(context, true, false, PostingFeatures.POSITIONS, context.reader().getLiveDocs());
       if (scorer != null) {
         int newDoc = scorer.advance(doc);
         if (newDoc == doc) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
index b263999..3943d8a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
@@ -19,7 +19,7 @@
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Weight;
@@ -80,7 +80,7 @@
 
     @Override
     public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-        boolean topScorer, Bits acceptDocs) throws IOException {
+        boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
       return new PayloadTermSpanScorer((TermSpans) query.getSpans(context, acceptDocs, termContexts),
           this, similarity.simScorer(stats, context));
     }
@@ -121,7 +121,7 @@
 
       protected void processPayload(Similarity similarity) throws IOException {
         if (termSpans.isPayloadAvailable()) {
-          final DocsAndPositionsEnum postings = termSpans.getPostings();
+          final DocsEnum postings = termSpans.getPostings();
           payload = postings.getPayload();
           if (payload != null) {
             payloadScore = function.currentScore(doc, term.field(),
@@ -177,7 +177,7 @@
     
     @Override
     public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
-      PayloadTermSpanScorer scorer = (PayloadTermSpanScorer) scorer(context, true, false, context.reader().getLiveDocs());
+      PayloadTermSpanScorer scorer = (PayloadTermSpanScorer) scorer(context, true, false, PostingFeatures.POSITIONS, context.reader().getLiveDocs());
       if (scorer != null) {
         int newDoc = scorer.advance(doc);
         if (newDoc == doc) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/posfilter/BlockPhraseScorer.java b/lucene/core/src/java/org/apache/lucene/search/posfilter/BlockPhraseScorer.java
new file mode 100644
index 0000000..a9d6e19
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/posfilter/BlockPhraseScorer.java
@@ -0,0 +1,70 @@
+package org.apache.lucene.search.posfilter;
+
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.similarities.Similarity;
+
+import java.io.IOException;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class BlockPhraseScorer extends PositionFilteredScorer {
+
+  private final Interval[] subIntervals;
+
+  public BlockPhraseScorer(Scorer filteredScorer, Similarity.SimScorer simScorer) {
+    super(filteredScorer, simScorer);
+    subIntervals = new Interval[subScorers.length];
+    for (int i = 0; i < subScorers.length; i++) {
+      subIntervals[i] = new Interval();
+    }
+  }
+
+  @Override
+  public void reset(int doc) throws IOException {
+    super.reset(doc);
+    for (int i = 0; i < subScorers.length; i++) {
+      subIntervals[i].reset();
+    }
+  }
+
+  @Override
+  protected int doNextPosition() throws IOException {
+    if (subScorers[0].nextPosition() == NO_MORE_POSITIONS)
+      return NO_MORE_POSITIONS;
+    subIntervals[0].update(subScorers[0]);
+    int i = 1;
+    while (i < subScorers.length) {
+      while (subIntervals[i].begin <= subIntervals[i - 1].end) {
+        if (subScorers[i].nextPosition() == NO_MORE_POSITIONS)
+          return NO_MORE_POSITIONS;
+        subIntervals[i].update(subScorers[i]);
+      }
+      if (subIntervals[i].begin == subIntervals[i - 1].end + 1) {
+        i++;
+      }
+      else {
+        if (subScorers[0].nextPosition() == NO_MORE_POSITIONS)
+          return NO_MORE_POSITIONS;
+        subIntervals[0].update(subScorers[0]);
+        i = 1;
+      }
+    }
+    current.update(subIntervals[0], subIntervals[subScorers.length - 1]);
+    return subScorers[0].startPosition();
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/posfilter/Interval.java b/lucene/core/src/java/org/apache/lucene/search/posfilter/Interval.java
new file mode 100644
index 0000000..b29f85c
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/posfilter/Interval.java
@@ -0,0 +1,200 @@
+package org.apache.lucene.search.posfilter;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.DocsEnum;
+
+import java.io.IOException;
+
+/**
+ * Represents a section of a document that matches a query
+ */
+public class Interval implements Cloneable {
+
+  /** The position of the start of this Interval */
+  public int begin;
+
+  /** The position of the end of this Interval */
+  public int end;
+
+  /** The offset of the start of this Interval */
+  public int offsetBegin;
+
+  /** The offset of the end of this Interval */
+  public int offsetEnd;
+
+  /** An interval that will always compare as less than any other interval */
+  public static final Interval INFINITE_INTERVAL = new Interval();
+
+   /**
+   * Constructs a new Interval
+   * @param begin the start position
+   * @param end the end position
+   * @param offsetBegin the start offset
+   * @param offsetEnd the end offset
+   */
+  public Interval(int begin, int end, int offsetBegin, int offsetEnd) {
+    this.begin = begin;
+    this.end = end;
+    this.offsetBegin = offsetBegin;
+    this.offsetEnd = offsetEnd;
+  }
+
+  /**
+   * Constructs a new Interval with no initial values.  This
+   * will always compare as less than any other Interval.
+   */
+  public Interval() {
+    this(Integer.MIN_VALUE, Integer.MIN_VALUE, -1, -1);
+  }
+
+  public Interval(DocsEnum docsEnum) throws IOException {
+    this.begin = docsEnum.startPosition();
+    this.end = docsEnum.endPosition();
+    this.offsetBegin = docsEnum.startOffset();
+    this.offsetEnd = docsEnum.endOffset();
+  }
+
+  /**
+   * Update to span the range defined by two other Intervals.
+   * @param start the first Interval
+   * @param end the second Interval
+   */
+  public void update(Interval start, Interval end) {
+    this.begin = start.begin;
+    this.offsetBegin = start.offsetBegin;
+    this.end = end.end;
+    this.offsetEnd = end.offsetEnd;
+  }
+
+  /**
+   * Compare with another Interval.
+   * @param other the comparator
+   * @return true if both start and end positions are less than
+   *              the comparator.
+   */
+  public boolean lessThanExclusive(Interval other) {
+    return begin < other.begin && end < other.end;
+  }
+
+  /**
+   * Compare with another Interval.
+   * @param other the comparator
+   * @return true if both start and end positions are less than
+   *              or equal to the comparator's.
+   */
+  public boolean lessThan(Interval other) {
+    return begin <= other.begin && end <= other.end;
+  }
+
+  /**
+   * Compare with another Interval
+   * @param other the comparator
+   * @return true if both start and end positions are greater then
+   *              the comparator's.
+   */
+  public boolean greaterThanExclusive(Interval other) {
+    return begin > other.begin && end > other.end;
+  }
+
+  /**
+   * Compare with another Interval
+   * @param other the comparator
+   * @return true if both start and end positions are greater then
+   *              of equal to the comparator's.
+   */
+  public boolean greaterThan(Interval other) {
+    return begin >= other.begin && end >= other.end;
+  }
+
+  /**
+   * Compare with another Interval
+   * @param other the comparator
+   * @return true if this Interval contains the comparator
+   */
+  public boolean contains(Interval other) {
+    return begin <= other.begin && other.end <= end;
+  }
+
+  /**
+   * Compare with another Interval to find overlaps
+   * @param other
+   * @return true if the two intervals overlap
+   */
+  public boolean overlaps(Interval other) {
+    return this.contains(other) || other.contains(this);
+  }
+
+  /**
+   * Set all values of this Interval to be equal to another's
+   * @param other the Interval to copy
+   */
+  public void copy(Interval other) {
+    begin = other.begin;
+    end = other.end;
+    offsetBegin = other.offsetBegin;
+    offsetEnd = other.offsetEnd;
+  }
+
+  /**
+   * Set to a state that will always compare as less than any
+   * other Interval.
+   */
+  public void reset() {
+    offsetBegin = offsetEnd = -1;
+    begin = end = Integer.MIN_VALUE;
+  }
+
+  /**
+   * Set to a state that will always compare as more than any
+   * other Interval.
+   */
+  public void setMaximum() {
+    offsetBegin = offsetEnd = -1;
+    begin = end = Integer.MAX_VALUE;
+  }
+  
+  @Override
+  public Object clone() {
+    try {
+      return super.clone();
+    } catch (CloneNotSupportedException e) {
+      throw new RuntimeException(); // should not happen
+    }
+  }
+  
+  @Override
+  public String toString() {
+    return "Interval [begin=" + begin + "(" + offsetBegin + "), end="
+        + end + "(" + offsetEnd + ")]";
+  }
+
+  public void update(DocsEnum docsEnum) throws IOException {
+    offsetBegin = docsEnum.startOffset();
+    offsetEnd = docsEnum.endOffset();
+    begin = docsEnum.startPosition();
+    end = docsEnum.endPosition();
+  }
+
+  public void update(Interval interval) {
+    this.begin = interval.begin;
+    this.end = interval.end;
+    this.offsetBegin = interval.offsetBegin;
+    this.offsetEnd = interval.offsetEnd;
+  }
+
+}
\ No newline at end of file
diff --git a/lucene/core/src/java/org/apache/lucene/search/posfilter/NonOverlappingQuery.java b/lucene/core/src/java/org/apache/lucene/search/posfilter/NonOverlappingQuery.java
new file mode 100644
index 0000000..4cba670
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/posfilter/NonOverlappingQuery.java
@@ -0,0 +1,206 @@
+package org.apache.lucene.search.posfilter;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.Bits;
+
+import java.io.IOException;
+import java.util.Set;
+
+/**
+ * A Query that matches documents containing an interval (the minuend) that
+ * does not contain another interval (the subtrahend).
+ *
+ * As an example, given the following {@link org.apache.lucene.search.BooleanQuery}:
+ * <pre>
+ *   BooleanQuery bq = new BooleanQuery();
+ *   bq.add(new TermQuery(new Term(field, "quick")), BooleanQuery.Occur.MUST);
+ *   bq.add(new TermQuery(new Term(field, "fox")), BooleanQuery.Occur.MUST);
+ * </pre>
+ *
+ * The document "the quick brown fox" will be matched by this query.  But
+ * create a NonOverlappingQuery using this query as a minuend:
+ * <pre>
+ *   NonOverlappingQuery brq = new NonOverlappingQuery(bq, new TermQuery(new Term(field, "brown")));
+ * </pre>
+ *
+ * This query will not match "the quick brown fox", because "brown" is found
+ * within the interval of the boolean query for "quick" and "fox.  The query
+ * will match "the quick fox is brown", because here "brown" is outside
+ * the minuend's interval.
+ *
+ * N.B. Positions must be included in the index for this query to work
+ *
+ * Implements the Brouwerian operator as defined in <a href=
+ * "http://vigna.dsi.unimi.it/ftp/papers/EfficientAlgorithmsMinimalIntervalSemantics"
+ * >"Efficient Optimally Lazy Algorithms for Minimal-Interval Semantics"</a>
+ *
+ * @lucene.experimental
+ * @see org.apache.lucene.search.intervals.BrouwerianIntervalIterator
+ */
+public final class NonOverlappingQuery extends PositionFilterQuery {
+
+  private Query subtrahend;
+
+  /**
+   * Constructs a Query that matches documents containing intervals of the minuend
+   * that are not subtended by the subtrahend
+   * @param minuend the minuend Query
+   * @param subtrahend the subtrahend Query
+   */
+  public NonOverlappingQuery(Query minuend, Query subtrahend) {
+    super(minuend, new BrouwerianScorerFactory(subtrahend));
+    this.subtrahend = subtrahend;
+  }
+
+  @Override
+  public void extractTerms(Set<Term> terms) {
+    super.extractTerms(terms);
+    subtrahend.extractTerms(terms);
+  }
+
+  @Override
+  public Query rewrite(IndexReader reader) throws IOException {
+    Query rewrittenMinuend = innerQuery.rewrite(reader);
+    Query rewrittenSubtrahend = subtrahend.rewrite(reader);
+    if (rewrittenMinuend != innerQuery || rewrittenSubtrahend != subtrahend) {
+      return new NonOverlappingQuery(rewrittenMinuend, rewrittenSubtrahend);
+    }
+    return this;
+  }
+
+  private static class BrouwerianScorerFactory implements ScorerFilterFactory {
+
+    private final Query subtrahend;
+
+    BrouwerianScorerFactory(Query subtrahend) {
+      this.subtrahend = subtrahend;
+    }
+
+    @Override
+    public Scorer scorer(Scorer filteredScorer, Similarity.SimScorer simScorer) {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public String getName() {
+      return "NonOverlapping[" + subtrahend.toString() + "]/";
+    }
+  }
+
+  @Override
+  public Weight createWeight(IndexSearcher searcher) throws IOException {
+    return new BrouwerianWeight(innerQuery.createWeight(searcher),
+                                subtrahend.createWeight(searcher), searcher);
+  }
+
+  class BrouwerianWeight extends ScorerFilterWeight {
+
+    private final Weight subtrahendWeight;
+
+    public BrouwerianWeight(Weight minuendWeight, Weight subtrahendWeight, IndexSearcher searcher)
+        throws IOException {
+      super(minuendWeight, searcher);
+      this.subtrahendWeight = subtrahendWeight;
+    }
+
+    @Override
+    public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer,
+                         PostingFeatures flags, Bits acceptDocs) throws IOException {
+      return new BrouwerianScorer(innerWeight.scorer(context, scoreDocsInOrder, topScorer, flags, acceptDocs),
+                                  subtrahendWeight.scorer(context, scoreDocsInOrder, topScorer, flags, acceptDocs),
+                                  similarity.simScorer(stats, context));
+    }
+  }
+
+  static class BrouwerianScorer extends PositionFilteredScorer {
+
+    private final Scorer subtrahend;
+    private Interval subtInterval = new Interval();
+    private int subtPosition = -1;
+
+    BrouwerianScorer(Scorer minuend, Scorer subtrahend, Similarity.SimScorer simScorer) {
+      super(minuend, simScorer);
+      this.subtrahend = subtrahend;
+    }
+
+    @Override
+    protected void reset(int doc) throws IOException {
+      super.reset(doc);
+      if (this.subtrahend == null || this.subtrahend.advance(doc) != doc)
+        subtPosition = NO_MORE_POSITIONS;
+      else
+        subtPosition = -1;
+      this.subtInterval.reset();
+    }
+
+    @Override
+    protected int doNextPosition() throws IOException {
+      if (subtPosition == NO_MORE_POSITIONS) {
+        int pos = child.nextPosition();
+        if (pos != NO_MORE_POSITIONS)
+          current.update(child);
+        return pos;
+      }
+      while (child.nextPosition() != NO_MORE_POSITIONS) {
+        current.update(child);
+        while (subtInterval.lessThanExclusive(current) &&
+                  (subtPosition = subtrahend.nextPosition()) != NO_MORE_POSITIONS) {
+          subtInterval.update(subtrahend);
+        }
+        if (subtPosition == NO_MORE_POSITIONS || !current.overlaps(subtInterval))
+          return current.begin;
+      }
+      return NO_MORE_POSITIONS;
+    }
+  }
+
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = super.hashCode();
+    result = prime * result + ((innerQuery == null) ? 0 : innerQuery.hashCode());
+    result = prime * result
+        + ((subtrahend == null) ? 0 : subtrahend.hashCode());
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) return true;
+    if (!super.equals(obj)) return false;
+    if (getClass() != obj.getClass()) return false;
+    NonOverlappingQuery other = (NonOverlappingQuery) obj;
+    if (innerQuery == null) {
+      if (other.innerQuery != null) return false;
+    } else if (!innerQuery.equals(other.innerQuery)) return false;
+    if (subtrahend == null) {
+      if (other.subtrahend != null) return false;
+    } else if (!subtrahend.equals(other.subtrahend)) return false;
+    return true;
+  }
+
+}
\ No newline at end of file
diff --git a/lucene/core/src/java/org/apache/lucene/search/posfilter/OrderedNearQuery.java b/lucene/core/src/java/org/apache/lucene/search/posfilter/OrderedNearQuery.java
new file mode 100644
index 0000000..27d3348
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/posfilter/OrderedNearQuery.java
@@ -0,0 +1,139 @@
+package org.apache.lucene.search.posfilter;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.similarities.Similarity;
+
+import java.io.IOException;
+
+/**
+ * A query that matches if a set of subqueries also match, and are within
+ * a given distance of each other within the document.  The subqueries
+ * must appear in the document in order.
+ *
+ * N.B. Positions must be included in the index for this query to work
+ *
+ * Implements the AND< operator as defined in <a href=
+ * "http://vigna.dsi.unimi.it/ftp/papers/EfficientAlgorithmsMinimalIntervalSemantics"
+ * >"Efficient Optimally Lazy Algorithms for Minimal-Interval Semantics"</a>
+ *
+ * @lucene.experimental
+ */
+
+public class OrderedNearQuery extends PositionFilterQuery {
+
+  /**
+   * Constructs an OrderedNearQuery
+   * @param slop the maximum distance between the subquery matches
+   * @param subqueries the subqueries to match.
+   */
+  public OrderedNearQuery(int slop, Query... subqueries) {
+    super(buildBooleanQuery(subqueries), new OrderedNearScorerFactory(slop));
+  }
+
+  private static class OrderedNearScorerFactory implements ScorerFilterFactory {
+
+    private final int slop;
+
+    public OrderedNearScorerFactory(int slop) {
+      this.slop = slop;
+    }
+
+    @Override
+    public Scorer scorer(Scorer filteredScorer, Similarity.SimScorer simScorer) {
+      return new WithinFilteredScorer(new OrderedNearScorer(filteredScorer, simScorer), slop, simScorer);
+    }
+
+    @Override
+    public String getName() {
+      return "OrderedNear/" + slop;
+    }
+  }
+
+  private static class OrderedNearScorer extends PositionFilteredScorer {
+
+    private final int lastiter;
+
+    private int index = 1;
+    private Interval[] intervals;
+
+    public OrderedNearScorer(Scorer filteredScorer, Similarity.SimScorer simScorer) {
+      super(filteredScorer, simScorer);
+      intervals = new Interval[subScorers.length];
+      for (int i = 0; i < subScorers.length; i++) {
+        intervals[i] = new Interval();
+      }
+      lastiter = intervals.length - 1;
+    }
+
+    @Override
+    public int freq() throws IOException {
+      return 1; // nocommit
+    }
+
+    @Override
+    protected void reset(int doc) throws IOException {
+      for (int i = 0; i < subScorers.length; i++) {
+        assert subScorers[i].docID() == doc;
+        intervals[i].update(Interval.INFINITE_INTERVAL);
+      }
+      if (subScorers[0].nextPosition() == NO_MORE_POSITIONS)
+        intervals[0].setMaximum();
+      else
+        intervals[0].update(subScorers[0]);
+      index = 1;
+    }
+
+    @Override
+    protected int doNextPosition() throws IOException {
+      if (intervals[0].begin == NO_MORE_POSITIONS)
+        return NO_MORE_POSITIONS;
+      current.setMaximum();
+      int b = Integer.MAX_VALUE;
+      while (true) {
+        while (true) {
+          final Interval previous = intervals[index - 1];
+          if (previous.end >= b) {
+            return current.begin;
+          }
+          if (index == intervals.length || intervals[index].begin > previous.end)
+            break;
+          Interval scratch = intervals[index];
+          do {
+            if (scratch.end >= b || subScorers[index].nextPosition() == NO_MORE_POSITIONS)
+              return current.begin;
+            intervals[index].update(subScorers[index]);
+            scratch = intervals[index];
+          } while (scratch.begin <= previous.end);
+          index++;
+        }
+        current.update(intervals[0], intervals[lastiter]);
+        matchDistance = (intervals[lastiter].begin - lastiter) - intervals[0].end;
+        b = intervals[lastiter].begin;
+        index = 1;
+        if (subScorers[0].nextPosition() == NO_MORE_POSITIONS) {
+          intervals[0].setMaximum();
+          return current.begin;
+        }
+        intervals[0].update(subScorers[0]);
+      }
+    }
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/posfilter/PartiallyOrderedNearScorer.java b/lucene/core/src/java/org/apache/lucene/search/posfilter/PartiallyOrderedNearScorer.java
new file mode 100644
index 0000000..bfec3c0
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/posfilter/PartiallyOrderedNearScorer.java
@@ -0,0 +1,261 @@
+package org.apache.lucene.search.posfilter;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.search.PositionQueue;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.IntroSorter;
+
+import java.io.IOException;
+
+
+public class PartiallyOrderedNearScorer extends PositionFilteredScorer {
+
+  private final SloppySpanningPositionQueue posQueue;
+  private final int allowedSlop;
+
+  private int previousEnd;
+
+  public static int MAX_SLOP = Integer.MAX_VALUE;
+
+  public PartiallyOrderedNearScorer(Scorer filteredScorer, int allowedSlop, Similarity.SimScorer simScorer) {
+    super(filteredScorer, simScorer);
+    this.posQueue = new SloppySpanningPositionQueue(subScorers);
+    this.allowedSlop = allowedSlop;
+  }
+
+  @Override
+  protected int doNextPosition() throws IOException {
+    int currentSlop = MAX_SLOP;
+    while (posQueue.isFull() && posQueue.span.begin == current.begin) {
+      posQueue.nextPosition();
+    }
+    if (!posQueue.isFull())
+      return NO_MORE_POSITIONS;
+    while (true) {
+      do {
+        posQueue.updateCurrent(current);
+        if (current.begin > previousEnd) {
+          currentSlop = posQueue.calculateSlop(allowedSlop);
+          System.out.println("Calculated slop on " + posQueue.toString() + ": " + currentSlop);
+          if (currentSlop <= allowedSlop) {
+            previousEnd = current.end;
+            return current.begin;
+          }
+        }
+        posQueue.nextPosition();
+      } while (posQueue.isFull() && current.end == posQueue.span.end);
+      if (current.begin <= previousEnd)
+        continue;
+      if (currentSlop <= allowedSlop) {
+        previousEnd = current.end;
+        return current.begin;
+      }
+      if (!posQueue.isFull())
+        return NO_MORE_POSITIONS;
+    }
+  }
+
+  @Override
+  protected void reset(int doc) throws IOException {
+    super.reset(doc);
+    current.reset();
+    posQueue.advanceTo(doc);
+    previousEnd = -1;
+  }
+
+  private static class IntervalRef {
+
+    public Interval interval = new Interval();
+    public int ord;
+
+    public IntervalRef() {}
+
+    public void update(IntervalRef other) {
+      this.ord = other.ord;
+      this.interval.update(other.interval);
+    }
+
+    public void update(Interval interval, int ord) {
+      this.ord = ord;
+      this.interval.update(interval);
+    }
+  }
+
+  private static class SloppySpanningPositionQueue extends PositionQueue {
+
+    Interval span = new Interval();
+    final Interval[] subIntervals;
+    final IntervalRef[] sortedIntervals;
+    int scorerCount;
+
+    public SloppySpanningPositionQueue(Scorer[] subScorers) {
+      super(subScorers);
+      scorerCount = subScorers.length;
+      subIntervals = new Interval[subScorers.length];
+      sortedIntervals = new IntervalRef[subScorers.length];
+      for (int i = 0; i < subScorers.length; i++) {
+        subIntervals[i] = new Interval();
+        sortedIntervals[i] = new IntervalRef();
+      }
+    }
+
+    public boolean isFull() {
+      return queuesize == scorerCount;
+    }
+
+    public void updateCurrent(Interval current) {
+      final Interval top = this.top().interval;
+      current.update(top, span);
+    }
+
+    private void updateRightExtreme(Interval newRight) {
+      if (span.end <= newRight.end) {
+        span.update(span, newRight);
+      }
+    }
+
+    protected void updateInternalIntervals() {
+      DocsEnumRef deRef = top();
+      subIntervals[deRef.ord].update(deRef.interval);
+      //subIntervals[deRef.ord].ord = deRef.ord;
+      updateRightExtreme(deRef.interval);
+    }
+
+    @Override
+    public int nextPosition() throws IOException {
+      int position;
+      if ((position = super.nextPosition()) == DocsEnum.NO_MORE_POSITIONS) {
+        return DocsEnum.NO_MORE_POSITIONS;
+      }
+      span.update(top().interval, span);
+      System.out.println("SSPQ: " + span.toString());
+      return position;
+    }
+
+    @Override
+    protected void init() throws IOException {
+      super.init();
+      DocsEnumRef deRef;
+      for (Object heapRef : getHeapArray()) {
+        if (heapRef != null) {
+          deRef = (DocsEnumRef) heapRef;
+          subIntervals[deRef.ord].update(deRef.interval);
+          //subIntervals[deRef.ord].ord = deRef.ord;
+          updateRightExtreme(deRef.interval);
+        }
+      }
+    }
+
+    @Override
+    public void advanceTo(int doc) {
+      super.advanceTo(doc);
+      span.reset();
+    }
+
+    @Override
+    protected boolean lessThan(DocsEnumRef left, DocsEnumRef right) {
+      final Interval a = left.interval;
+      final Interval b = right.interval;
+      return a.begin < b.begin || (a.begin == b.begin && a.end > b.end);
+    }
+
+    @Override
+    public String toString() {
+      return top().interval.toString();
+    }
+
+    // nocommit, is this algorithm ok or is it going to be horribly inefficient?
+    // We sort the subintervals by their start positions.  If a subinterval is
+    // out of position, we calculate it's slop contribution by counting the
+    // number of subsequent subintervals with lower ords.  Gaps between subintervals
+    // are also added.  If the running total exceeds a provided max allowed slop,
+    // then we shortcut the calculation and return MAX_SLOP.
+    // If duplicates are detected by the subinterval sorter, MAX_SLOP is also returned
+    public int calculateSlop(int maxAllowedSlop) {
+      boolean swaps = false;
+      int slop = 0;
+      if (sortSubIntervals())
+        return MAX_SLOP;
+      for (int i = 0; i < sortedIntervals.length; i++) {
+        if (swaps || sortedIntervals[i].ord != i) {
+          swaps = true;
+          for (int j = i + 1; j < sortedIntervals.length; j++) {
+            if (sortedIntervals[j].ord < sortedIntervals[i].ord)
+              slop++;
+          }
+        }
+        if (i > 0)
+          slop += (sortedIntervals[i].interval.begin - sortedIntervals[i - 1].interval.end) - 1;
+        if (slop > maxAllowedSlop)
+          return MAX_SLOP;
+      }
+      return slop;
+    }
+
+    private boolean sortSubIntervals() {
+
+      for (int i = 0; i < subIntervals.length; i++) {
+        sortedIntervals[i].update(subIntervals[i], i);
+      }
+
+      sorter.duplicates = false;
+      sorter.sort(0, sortedIntervals.length - 1);
+      return sorter.duplicates;
+    }
+
+    DuplicateCheckingSorterTemplate sorter = new DuplicateCheckingSorterTemplate();
+
+    class DuplicateCheckingSorterTemplate extends IntroSorter {
+
+        int pivot;
+        boolean duplicates;
+
+        @Override
+        protected void swap(int i, int j) {
+          IntervalRef temp = new IntervalRef();
+          temp.update(sortedIntervals[i]);
+          sortedIntervals[i].update(sortedIntervals[j]);
+          sortedIntervals[j].update(temp);
+        }
+
+        @Override
+        protected int compare(int i, int j) {
+          //System.out.println("Comparing " + sortedIntervals[i].interval + " with " + sortedIntervals[j].interval);
+          if (sortedIntervals[i].interval.begin == sortedIntervals[j].interval.begin &&
+              sortedIntervals[i].interval.end == sortedIntervals[j].interval.end)
+            duplicates = true;
+          return Long.signum(sortedIntervals[i].interval.begin - sortedIntervals[j].interval.begin);
+        }
+
+        @Override
+        protected void setPivot(int i) {
+          this.pivot = sortedIntervals[i].interval.begin;
+        }
+
+        @Override
+        protected int comparePivot(int j) {
+          return Long.signum(pivot - sortedIntervals[j].interval.begin);
+        }
+
+
+    }
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/posfilter/PhraseQuery2.java b/lucene/core/src/java/org/apache/lucene/search/posfilter/PhraseQuery2.java
new file mode 100644
index 0000000..a583c58
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/posfilter/PhraseQuery2.java
@@ -0,0 +1,84 @@
+package org.apache.lucene.search.posfilter;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.similarities.Similarity;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class PhraseQuery2 extends PositionFilterQuery {
+
+  private final BooleanQuery innerBQ;
+  private final int slop;
+
+  public PhraseQuery2(int slop) {
+    super(new BooleanQuery(), new ExactPhraseScorerFactory(slop));
+    this.innerBQ = (BooleanQuery) innerQuery;
+    this.slop = slop;
+  }
+
+  public PhraseQuery2() {
+    this(0);
+  }
+
+  public int getSlop() {
+    return slop;
+  }
+
+  public void add(Term term) {
+    innerBQ.add(new TermQuery(term), BooleanClause.Occur.MUST);
+  }
+
+  public void addMultiTerm(Term... terms) {
+    if (terms.length == 1) {
+      add(terms[0]);
+      return;
+    }
+    BooleanQuery disj = new BooleanQuery();
+    for (Term term : terms) {
+      disj.add(new TermQuery(term), BooleanClause.Occur.SHOULD);
+    }
+    innerBQ.add(disj, BooleanClause.Occur.MUST);
+  }
+
+  private static class ExactPhraseScorerFactory implements ScorerFilterFactory {
+
+    private final int slop;
+
+    ExactPhraseScorerFactory(int slop) {
+      this.slop = slop;
+    }
+
+    @Override
+    public Scorer scorer(Scorer filteredScorer, Similarity.SimScorer simScorer) {
+      if (slop == 0)
+        return new BlockPhraseScorer(filteredScorer, simScorer);
+      else
+        return new PartiallyOrderedNearScorer(filteredScorer, slop, simScorer);
+    }
+
+    @Override
+    public String getName() {
+      return slop == 0 ? "ExactPhrase" : "SloppyPhrase/" + slop;
+    }
+  }
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/posfilter/PositionFilterQuery.java b/lucene/core/src/java/org/apache/lucene/search/posfilter/PositionFilterQuery.java
new file mode 100644
index 0000000..a442635
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/posfilter/PositionFilterQuery.java
@@ -0,0 +1,169 @@
+package org.apache.lucene.search.posfilter;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermContext;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermStatistics;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.Bits;
+
+import java.io.IOException;
+import java.util.Set;
+import java.util.TreeSet;
+
+public class PositionFilterQuery extends Query {
+
+  protected final Query innerQuery;
+  protected final ScorerFilterFactory scorerFilterFactory;
+
+  public PositionFilterQuery(Query innerQuery, ScorerFilterFactory scorerFilterFactory) {
+    this.innerQuery = innerQuery;
+    this.scorerFilterFactory = scorerFilterFactory;
+  }
+
+  protected static BooleanQuery buildBooleanQuery(Query... queries) {
+    BooleanQuery bq = new BooleanQuery();
+    for (Query q : queries) {
+      bq.add(q, BooleanClause.Occur.MUST);
+    }
+    return bq;
+  }
+
+  @Override
+  public void extractTerms(Set<Term> terms) {
+    innerQuery.extractTerms(terms);
+  }
+
+  @Override
+  public Query rewrite(IndexReader reader) throws IOException {
+    Query rewritten =  innerQuery.rewrite(reader);
+    if (rewritten != innerQuery) {
+      return new PositionFilterQuery(rewritten, scorerFilterFactory);
+    }
+    return this;
+  }
+
+  @Override
+  public Weight createWeight(IndexSearcher searcher) throws IOException {
+    return new ScorerFilterWeight(innerQuery.createWeight(searcher), searcher);
+  }
+
+  @Override
+  public String toString(String field) {
+    return scorerFilterFactory.getName() + "[" + innerQuery.toString() + "]";
+  }
+
+  public class ScorerFilterWeight extends Weight {
+
+    protected final Weight innerWeight;
+    protected final Similarity similarity;
+    protected final Similarity.SimWeight stats;
+
+    public ScorerFilterWeight(Weight innerWeight, IndexSearcher searcher) throws IOException {
+      this.innerWeight = innerWeight;
+      this.similarity = searcher.getSimilarity();
+      this.stats = getSimWeight(innerWeight.getQuery(), searcher);
+    }
+
+    private Similarity.SimWeight getSimWeight(Query query, IndexSearcher searcher)  throws IOException {
+      TreeSet<Term> terms = new TreeSet<Term>();
+      query.extractTerms(terms);
+      if (terms.size() == 0)
+        return null;
+      int i = 0;
+      TermStatistics[] termStats = new TermStatistics[terms.size()];
+      for (Term term : terms) {
+        TermContext state = TermContext.build(searcher.getTopReaderContext(), term);
+        termStats[i] = searcher.termStatistics(term, state);
+        i++;
+      }
+      final String field = terms.first().field(); // nocommit - should we be checking all filtered terms
+      // are on the same field?
+      return similarity.computeWeight(query.getBoost(), searcher.collectionStatistics(field), termStats);
+    }
+
+    @Override
+    public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
+      Scorer scorer = scorer(context, true, false, PostingFeatures.POSITIONS,
+          context.reader().getLiveDocs());
+      if (scorer != null) {
+        int newDoc = scorer.advance(doc);
+        if (newDoc == doc) {
+          float freq = scorer.freq();
+          Similarity.SimScorer docScorer = similarity.simScorer(stats, context);
+          ComplexExplanation result = new ComplexExplanation();
+          result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
+          Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "phraseFreq=" + freq));
+          result.addDetail(scoreExplanation);
+          result.setValue(scoreExplanation.getValue());
+          result.setMatch(true);
+          return result;
+        }
+      }
+      return new ComplexExplanation(false, 0.0f,
+          "No matching term within position filter");
+    }
+
+    @Override
+    public Query getQuery() {
+      return PositionFilterQuery.this;
+    }
+
+    @Override
+    public float getValueForNormalization() throws IOException {
+      return stats == null ? 1.0f : stats.getValueForNormalization();
+    }
+
+    @Override
+    public void normalize(float norm, float topLevelBoost) {
+      if (stats != null)
+        stats.normalize(norm, topLevelBoost);
+    }
+
+    @Override
+    public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer,
+                         PostingFeatures flags, Bits acceptDocs) throws IOException {
+      PostingFeatures posFlags =
+          (flags == PostingFeatures.DOCS_ONLY || flags == PostingFeatures.DOCS_AND_FREQS) ?
+              PostingFeatures.POSITIONS : flags;
+      System.out.println("Scorer flags: " + flags);
+      Scorer filteredScorer = innerWeight.scorer(context, scoreDocsInOrder, topScorer, posFlags, acceptDocs);
+      return filteredScorer == null ? null
+                : scorerFilterFactory.scorer(filteredScorer, similarity.simScorer(stats, context));
+    }
+  }
+
+  public static interface ScorerFilterFactory {
+
+    public Scorer scorer(Scorer filteredScorer, Similarity.SimScorer simScorer);
+
+    public String getName();
+
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/posfilter/PositionFilteredScorer.java b/lucene/core/src/java/org/apache/lucene/search/posfilter/PositionFilteredScorer.java
new file mode 100644
index 0000000..8dfbb43
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/posfilter/PositionFilteredScorer.java
@@ -0,0 +1,137 @@
+package org.apache.lucene.search.posfilter;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.similarities.Similarity;
+
+import java.io.IOException;
+
+public abstract class PositionFilteredScorer extends Scorer {
+
+  protected final Scorer[] subScorers;
+  protected final Scorer child;
+  protected final Interval current = new Interval();
+  protected final Similarity.SimScorer simScorer;
+  protected int matchDistance;
+
+  private boolean buffered;
+
+  public PositionFilteredScorer(Scorer filteredScorer, Similarity.SimScorer simScorer) {
+    super(filteredScorer.getWeight());
+    this.simScorer = simScorer;
+    child = filteredScorer;
+    subScorers = new Scorer[filteredScorer.getChildren().size()];
+    int i = 0;
+    for (ChildScorer subScorer : filteredScorer.getChildren()) {
+      subScorers[i++] = subScorer.child;
+    }
+  }
+
+  @Override
+  public float score() throws IOException {
+    return this.simScorer.score(docID(), freq());
+  }
+
+  @Override
+  public int docID() {
+    return child.docID();
+  }
+
+  @Override
+  public int freq() throws IOException {
+    int freq = 0;
+    while (nextPosition() != NO_MORE_POSITIONS) {
+      freq++;
+    }
+    return freq;
+  }
+
+  @Override
+  public int nextDoc() throws IOException {
+    while (child.nextDoc() != NO_MORE_DOCS) {
+      reset(child.docID());
+      if (nextPosition() != NO_MORE_POSITIONS) {
+        buffered = true;
+        return child.docID();
+      }
+    }
+    return NO_MORE_DOCS;
+  }
+
+  @Override
+  public int advance(int target) throws IOException {
+    if (child.advance(target) == NO_MORE_DOCS)
+      return NO_MORE_DOCS;
+    do {
+      reset(child.docID());
+      if (nextPosition() != NO_MORE_POSITIONS) {
+        buffered = true;
+        return child.docID();
+      }
+    } while (child.nextDoc() != NO_MORE_DOCS);
+    return NO_MORE_DOCS;
+  }
+
+  @Override
+  public int nextPosition() throws IOException {
+    if (buffered) {
+      //System.out.println(this.hashCode() + ": returning buffered nextPos");
+      buffered = false;
+      return current.begin;
+    }
+    //System.out.println(this.hashCode() + ": returning unbuffered nextPos");
+    return doNextPosition();
+  }
+
+  protected abstract int doNextPosition() throws IOException;
+
+  protected void reset(int doc) throws IOException {
+    buffered = false;
+  };
+
+  public int getMatchDistance() {
+    return matchDistance;
+  }
+
+  @Override
+  public int startPosition() throws IOException {
+    return current.begin;
+  }
+
+  @Override
+  public int endPosition() throws IOException {
+    return current.end;
+  }
+
+  @Override
+  public int startOffset() throws IOException {
+    return current.offsetBegin;
+  }
+
+  @Override
+  public int endOffset() throws IOException {
+    return current.offsetEnd;
+  }
+
+  @Override
+  public long cost() {
+    return child.cost();
+  }
+// nocommit Payloads - need to add these to Interval?
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/posfilter/RangeFilterQuery.java b/lucene/core/src/java/org/apache/lucene/search/posfilter/RangeFilterQuery.java
new file mode 100644
index 0000000..63b0d1d
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/posfilter/RangeFilterQuery.java
@@ -0,0 +1,82 @@
+package org.apache.lucene.search.posfilter;
+
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.similarities.Similarity;
+
+import java.io.IOException;
+
+/**
+ * Copyright (c) 2012 Lemur Consulting Ltd.
+ * <p/>
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class RangeFilterQuery extends PositionFilterQuery {
+
+  public RangeFilterQuery(int start, int end, Query innerQuery) {
+    super(innerQuery, new RangeFilterScorerFactory(start, end));
+  }
+
+  public RangeFilterQuery(int end, Query innerQuery) {
+    this(0, end, innerQuery);
+  }
+
+  private static class RangeFilterScorerFactory implements ScorerFilterFactory {
+
+    private final int start;
+    private final int end;
+
+    public RangeFilterScorerFactory(int start, int end) {
+      this.start = start;
+      this.end = end;
+    }
+
+    @Override
+    public Scorer scorer(Scorer filteredScorer, Similarity.SimScorer simScorer) {
+      return new RangeFilterScorer(start, end, filteredScorer, simScorer);
+    }
+
+    @Override
+    public String getName() {
+      return "RangeFilter(" + start + "," + end + ")";
+    }
+  }
+
+  private static class RangeFilterScorer extends PositionFilteredScorer {
+
+    private final int start;
+    private final int end;
+
+    public RangeFilterScorer(int start, int end, Scorer filteredScorer, Similarity.SimScorer simScorer) {
+      super(filteredScorer, simScorer);
+      this.start = start;
+      this.end = end;
+    }
+
+    @Override
+    protected int doNextPosition() throws IOException {
+      int position;
+      while ((position = child.nextPosition()) != NO_MORE_POSITIONS) {
+        if (position > end)
+          return NO_MORE_POSITIONS;
+        if (position >= start) {
+          current.update(child);
+          return position;
+        }
+      }
+      return NO_MORE_POSITIONS;
+    }
+  }
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/posfilter/UnorderedNearQuery.java b/lucene/core/src/java/org/apache/lucene/search/posfilter/UnorderedNearQuery.java
new file mode 100644
index 0000000..e6944bb
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/posfilter/UnorderedNearQuery.java
@@ -0,0 +1,187 @@
+package org.apache.lucene.search.posfilter;
+
+/**
+ * Copyright (c) 2012 Lemur Consulting Ltd.
+ * <p/>
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.search.PositionQueue;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.similarities.Similarity;
+
+import java.io.IOException;
+
+/**
+ * A query that matches if a set of subqueries also match, and are within
+ * a given distance of each other within the document.  The subqueries
+ * may appear in the document in any order.
+ *
+ * N.B. Positions must be included in the index for this query to work
+ *
+ * Implements the LOWPASS<sub>k</sub> operator as defined in <a href=
+ * "http://vigna.dsi.unimi.it/ftp/papers/EfficientAlgorithmsMinimalIntervalSemantics"
+ * >"Efficient Optimally Lazy Algorithms for Minimal-Interval Semantics"</a>
+ *
+ * @lucene.experimental
+ */
+
+public class UnorderedNearQuery extends PositionFilterQuery {
+
+  /**
+   * Constructs an OrderedNearQuery
+   * @param slop the maximum distance between the subquery matches
+   * @param subqueries the subqueries to match.
+   */
+  public UnorderedNearQuery(int slop, Query... subqueries) {
+    super(buildBooleanQuery(subqueries), new UnorderedNearScorerFactory(slop));
+  }
+
+  private static class UnorderedNearScorerFactory implements ScorerFilterFactory {
+
+    private final int slop;
+
+    UnorderedNearScorerFactory(int slop) {
+      this.slop = slop;
+    }
+
+    @Override
+    public Scorer scorer(Scorer filteredScorer, Similarity.SimScorer simScorer) {
+      return new WithinFilteredScorer(new UnorderedNearScorer(filteredScorer, simScorer), slop, simScorer);
+    }
+
+    @Override
+    public String getName() {
+      return "UnorderedNear/" + slop;
+    }
+  }
+
+  private static class UnorderedNearScorer extends PositionFilteredScorer {
+
+    SpanningPositionQueue posQueue;
+
+    public UnorderedNearScorer(Scorer filteredScorer, Similarity.SimScorer simScorer) {
+      super(filteredScorer, simScorer);
+      posQueue = new SpanningPositionQueue(subScorers);
+    }
+
+    @Override
+    protected int doNextPosition() throws IOException {
+      while (posQueue.isFull() && posQueue.span.begin == current.begin) {
+        posQueue.nextPosition();
+      }
+      if (!posQueue.isFull())
+        return NO_MORE_POSITIONS;
+      do {
+        //current.update(posQueue.top().interval, posQueue.span);
+        posQueue.updateCurrent(current);
+        if (current.equals(posQueue.top().interval))
+          return current.begin;
+        matchDistance = posQueue.getMatchDistance();
+        posQueue.nextPosition();
+      } while (posQueue.isFull() && current.end == posQueue.span.end);
+      return current.begin;
+    }
+
+    @Override
+    protected void reset(int doc) throws IOException {
+      super.reset(doc);
+      current.reset();
+      posQueue.advanceTo(doc);
+    }
+
+  }
+
+  private static class SpanningPositionQueue extends PositionQueue {
+
+    Interval span = new Interval();
+    int scorerCount;
+    int firstIntervalEnd;
+    int lastIntervalBegin;
+
+    public SpanningPositionQueue(Scorer[] subScorers) {
+      super(subScorers);
+      scorerCount = subScorers.length;
+    }
+
+    public int getMatchDistance() {
+      return lastIntervalBegin - firstIntervalEnd - scorerCount + 1;
+    }
+
+    public boolean isFull() {
+      return queuesize == scorerCount;
+    }
+
+    public void updateCurrent(Interval current) {
+      final Interval top = this.top().interval;
+      current.update(top, span);
+      this.firstIntervalEnd = top.end;
+    }
+
+    private void updateRightExtreme(Interval newRight) {
+      if (span.end <= newRight.end) {
+        span.update(span, newRight);
+        this.lastIntervalBegin = newRight.begin;
+      }
+    }
+
+    protected void updateInternalIntervals() {
+      updateRightExtreme(top().interval);
+    }
+
+    @Override
+    public int nextPosition() throws IOException {
+      int position;
+      if ((position = super.nextPosition()) == DocsEnum.NO_MORE_POSITIONS) {
+        return DocsEnum.NO_MORE_POSITIONS;
+      }
+      span.update(top().interval, span);
+      return position;
+    }
+
+    @Override
+    protected void init() throws IOException {
+      super.init();
+      for (Object docsEnumRef : getHeapArray()) {
+        if (docsEnumRef != null) {
+          final Interval i = ((DocsEnumRef) docsEnumRef).interval;
+          updateRightExtreme(i);
+        }
+      }
+    }
+
+    @Override
+    public void advanceTo(int doc) {
+      super.advanceTo(doc);
+      span.reset();
+      firstIntervalEnd = lastIntervalBegin = span.begin;
+    }
+
+    @Override
+    protected boolean lessThan(DocsEnumRef left, DocsEnumRef right) {
+      final Interval a = left.interval;
+      final Interval b = right.interval;
+      return a.begin < b.begin || (a.begin == b.begin && a.end > b.end);
+    }
+
+    @Override
+    public String toString() {
+      return top().interval.toString();
+    }
+  }
+
+
+}
+
diff --git a/lucene/core/src/java/org/apache/lucene/search/posfilter/WithinFilteredScorer.java b/lucene/core/src/java/org/apache/lucene/search/posfilter/WithinFilteredScorer.java
new file mode 100644
index 0000000..6b85f8c
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/posfilter/WithinFilteredScorer.java
@@ -0,0 +1,47 @@
+package org.apache.lucene.search.posfilter;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.similarities.Similarity;
+
+import java.io.IOException;
+
+public class WithinFilteredScorer extends PositionFilteredScorer {
+
+  private final int slop;
+  private final PositionFilteredScorer wrappedScorer;
+
+  public WithinFilteredScorer(PositionFilteredScorer wrappedScorer, int slop, Similarity.SimScorer simScorer) {
+    super(wrappedScorer, simScorer);
+    this.slop = slop;
+    this.wrappedScorer = wrappedScorer;
+  }
+
+  @Override
+  protected int doNextPosition() throws IOException {
+    int position;
+    while ((position = wrappedScorer.nextPosition()) != NO_MORE_POSITIONS) {
+      if (wrappedScorer.getMatchDistance() <= slop) {
+        current.update(wrappedScorer);
+        return position;
+      }
+    }
+    return NO_MORE_POSITIONS;
+  }
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java b/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
index b4ff8bb..0703287 100644
--- a/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
+++ b/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
@@ -17,9 +17,7 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-
-import org.apache.lucene.index.AtomicReader; // javadoc
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.FieldInvertState;
 import org.apache.lucene.search.BooleanQuery;
@@ -29,9 +27,11 @@
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TermStatistics;
-import org.apache.lucene.search.spans.SpanQuery; // javadoc
+import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.SmallFloat; // javadoc
+import org.apache.lucene.util.SmallFloat;
+
+import java.io.IOException;
 
 /** 
  * Similarity defines the components of Lucene scoring.
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java
index 74a098d..5fce32d 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java
@@ -21,6 +21,7 @@
 
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.search.similarities.Similarity;
 
 /**
@@ -103,9 +104,15 @@
   public float sloppyFreq() throws IOException {
     return freq;
   }
-  
+
+  @Override
+  public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+    return null;
+  }
+
   @Override
   public long cost() {
     return spans.cost();
   }
+
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
index f0a27c4..385f62b 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
@@ -20,7 +20,7 @@
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.TermContext;
 import org.apache.lucene.index.TermState;
 import org.apache.lucene.index.Terms;
@@ -120,7 +120,7 @@
     final TermsEnum termsEnum = context.reader().terms(term.field()).iterator(null);
     termsEnum.seekExact(term.bytes(), state);
     
-    final DocsAndPositionsEnum postings = termsEnum.docsAndPositions(acceptDocs, null, DocsAndPositionsEnum.FLAG_PAYLOADS);
+    final DocsEnum postings = termsEnum.docsAndPositions(acceptDocs, null, DocsEnum.FLAG_PAYLOADS);
 
     if (postings != null) {
       return new TermSpans(postings, term);
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
index fb73721..f12107e 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
@@ -82,7 +82,7 @@
 
   @Override
   public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-      boolean topScorer, Bits acceptDocs) throws IOException {
+      boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
     if (stats == null) {
       return null;
     } else {
@@ -92,7 +92,7 @@
 
   @Override
   public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
-    SpanScorer scorer = (SpanScorer) scorer(context, true, false, context.reader().getLiveDocs());
+    SpanScorer scorer = (SpanScorer) scorer(context, true, false, PostingFeatures.POSITIONS, context.reader().getLiveDocs());
     if (scorer != null) {
       int newDoc = scorer.advance(doc);
       if (newDoc == doc) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java b/lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java
index d4974a5..39f72eb 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java
@@ -17,7 +17,7 @@
 
 
 import org.apache.lucene.index.Term;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.util.BytesRef;
 
@@ -30,7 +30,7 @@
  * Public for extension only
  */
 public class TermSpans extends Spans {
-  protected final DocsAndPositionsEnum postings;
+  protected final DocsEnum postings;
   protected final Term term;
   protected int doc;
   protected int freq;
@@ -38,7 +38,7 @@
   protected int position;
   protected boolean readPayload;
 
-  public TermSpans(DocsAndPositionsEnum postings, Term term) {
+  public TermSpans(DocsEnum postings, Term term) {
     this.postings = postings;
     this.term = term;
     doc = -1;
@@ -132,7 +132,7 @@
             (doc == -1 ? "START" : (doc == Integer.MAX_VALUE) ? "END" : doc + "-" + position);
   }
 
-  public DocsAndPositionsEnum getPostings() {
+  public DocsEnum getPostings() {
     return postings;
   }
 
diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java b/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
index af307bb..c421033 100644
--- a/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
@@ -26,7 +26,7 @@
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
@@ -72,7 +72,7 @@
     writer.addDocument(doc);
     
     IndexReader reader = writer.getReader();
-    DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader,
+    DocsEnum termPositions = MultiFields.getTermPositionsEnum(reader,
                                                                           MultiFields.getLiveDocs(reader),
                                                                           "preanalyzed",
                                                                           new BytesRef("term1"));
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat3.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat3.java
index fe683e3..6537936 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat3.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat3.java
@@ -39,7 +39,6 @@
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
 import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
@@ -282,8 +281,8 @@
   public void assertTermsEnum(TermsEnum leftTermsEnum, TermsEnum rightTermsEnum, boolean deep) throws Exception {
     BytesRef term;
     Bits randomBits = new RandomBits(MAXDOC, random().nextDouble(), random());
-    DocsAndPositionsEnum leftPositions = null;
-    DocsAndPositionsEnum rightPositions = null;
+    DocsEnum leftPositions = null;
+    DocsEnum rightPositions = null;
     DocsEnum leftDocs = null;
     DocsEnum rightDocs = null;
     
@@ -304,30 +303,30 @@
                                 leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
                                 rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
         // with payloads only
-        assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsAndPositionsEnum.FLAG_PAYLOADS),
-                                   rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsAndPositionsEnum.FLAG_PAYLOADS));
-        assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsAndPositionsEnum.FLAG_PAYLOADS),
-                                   rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsAndPositionsEnum.FLAG_PAYLOADS));
+        assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsEnum.FLAG_PAYLOADS),
+                                   rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsEnum.FLAG_PAYLOADS));
+        assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsEnum.FLAG_PAYLOADS),
+                                   rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsEnum.FLAG_PAYLOADS));
 
         assertPositionsSkipping(leftTermsEnum.docFreq(), 
-                                leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsAndPositionsEnum.FLAG_PAYLOADS),
-                                rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsAndPositionsEnum.FLAG_PAYLOADS));
+                                leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsEnum.FLAG_PAYLOADS),
+                                rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsEnum.FLAG_PAYLOADS));
         assertPositionsSkipping(leftTermsEnum.docFreq(), 
-                                leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsAndPositionsEnum.FLAG_PAYLOADS),
-                                rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsAndPositionsEnum.FLAG_PAYLOADS));
+                                leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsEnum.FLAG_PAYLOADS),
+                                rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsEnum.FLAG_PAYLOADS));
 
         // with offsets only
-        assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsAndPositionsEnum.FLAG_OFFSETS),
-                                   rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsAndPositionsEnum.FLAG_OFFSETS));
-        assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsAndPositionsEnum.FLAG_OFFSETS),
-                                   rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsAndPositionsEnum.FLAG_OFFSETS));
+        assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsEnum.FLAG_OFFSETS),
+                                   rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsEnum.FLAG_OFFSETS));
+        assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsEnum.FLAG_OFFSETS),
+                                   rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsEnum.FLAG_OFFSETS));
 
         assertPositionsSkipping(leftTermsEnum.docFreq(), 
-                                leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsAndPositionsEnum.FLAG_OFFSETS),
-                                rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsAndPositionsEnum.FLAG_OFFSETS));
+                                leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsEnum.FLAG_OFFSETS),
+                                rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsEnum.FLAG_OFFSETS));
         assertPositionsSkipping(leftTermsEnum.docFreq(), 
-                                leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsAndPositionsEnum.FLAG_OFFSETS),
-                                rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsAndPositionsEnum.FLAG_OFFSETS));
+                                leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsEnum.FLAG_OFFSETS),
+                                rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsEnum.FLAG_OFFSETS));
         
         // with positions only
         assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsEnum.FLAG_NONE),
@@ -387,7 +386,7 @@
   /**
    * checks docs + freqs + positions + payloads, sequentially
    */
-  public void assertDocsAndPositionsEnum(DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws Exception {
+  public void assertDocsAndPositionsEnum(DocsEnum leftDocs, DocsEnum rightDocs) throws Exception {
     if (leftDocs == null || rightDocs == null) {
       assertNull(leftDocs);
       assertNull(rightDocs);
@@ -460,7 +459,7 @@
   /**
    * checks advancing docs + positions
    */
-  public void assertPositionsSkipping(int docFreq, DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws Exception {
+  public void assertPositionsSkipping(int docFreq, DocsEnum leftDocs, DocsEnum rightDocs) throws Exception {
     if (leftDocs == null || rightDocs == null) {
       assertNull(leftDocs);
       assertNull(rightDocs);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
index 4ebb857..acf85bb 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
@@ -17,12 +17,6 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Random;
-
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.FieldsProducer;
@@ -52,6 +46,12 @@
 import org.apache.lucene.util._TestUtil;
 import org.junit.BeforeClass;
 
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Random;
+
 // TODO: test multiple codecs here?
 
 // TODO
@@ -423,7 +423,7 @@
         assertTrue(doc != DocIdSetIterator.NO_MORE_DOCS);
         assertEquals(docs[i], doc);
         if (doPos) {
-          this.verifyPositions(positions[i], ((DocsAndPositionsEnum) docsEnum));
+          this.verifyPositions(positions[i], docsEnum);
         }
       }
       assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc());
@@ -431,7 +431,7 @@
 
     byte[] data = new byte[10];
 
-    private void verifyPositions(final PositionData[] positions, final DocsAndPositionsEnum posEnum) throws Throwable {
+    private void verifyPositions(final PositionData[] positions, final DocsEnum posEnum) throws Throwable {
       for(int i=0;i<positions.length;i++) {
         final int pos = posEnum.nextPosition();
         assertEquals(positions[i].pos, pos);
@@ -543,7 +543,7 @@
           term = field.terms[upto];
           if (random().nextInt(3) == 1) {
             final DocsEnum docs;
-            final DocsAndPositionsEnum postings;
+            final DocsEnum postings;
             if (!field.omitTF) {
               postings = termsEnum.docsAndPositions(null, null);
               if (postings != null) {
@@ -774,22 +774,21 @@
     @Override
     public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) {
       assert liveDocs == null;
-      return new DataDocsAndPositionsEnum(fieldData.terms[upto]);
+      return new DataDocsEnum(fieldData.terms[upto]);
     }
 
     @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
-      assert liveDocs == null;
-      return new DataDocsAndPositionsEnum(fieldData.terms[upto]);
+    public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) {
+      return docs(liveDocs, reuse, flags);
     }
   }
 
-  private static class DataDocsAndPositionsEnum extends DocsAndPositionsEnum {
+  private static class DataDocsEnum extends DocsEnum {
     final TermData termData;
     int docUpto = -1;
     int posUpto;
 
-    public DataDocsAndPositionsEnum(TermData termData) {
+    public DataDocsEnum(TermData termData) {
       this.termData = termData;
     }
 
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java
index 9aeb536..f5e11c1 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java
@@ -628,8 +628,8 @@
 
       while(enum1.next() != null) {
         assertEquals("Different terms", enum1.term(), enum2.next());
-        DocsAndPositionsEnum tp1 = enum1.docsAndPositions(liveDocs, null);
-        DocsAndPositionsEnum tp2 = enum2.docsAndPositions(liveDocs, null);
+        DocsEnum tp1 = enum1.docsAndPositions(liveDocs, null);
+        DocsEnum tp2 = enum2.docsAndPositions(liveDocs, null);
 
         while(tp1.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
           assertTrue(tp2.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDoc.java b/lucene/core/src/test/org/apache/lucene/index/TestDoc.java
index 62569ee..d1f96a6 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDoc.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDoc.java
@@ -260,7 +260,7 @@
           out.print("  term=" + field + ":" + tis.term());
           out.println("    DF=" + tis.docFreq());
 
-          DocsAndPositionsEnum positions = tis.docsAndPositions(reader.getLiveDocs(), null);
+          DocsEnum positions = tis.docsAndPositions(reader.getLiveDocs(), null);
 
           while (positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
             out.print(" doc=" + positions.docID());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java b/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java
index c4423bb..f1cb2ae 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java
@@ -42,7 +42,7 @@
   }
 
   /**
-   * Simple testcase for {@link DocsAndPositionsEnum}
+   * Simple testcase for {@link DocsEnum}
    */
   public void testPositionsSimple() throws IOException {
     Directory directory = newDirectory();
@@ -65,7 +65,7 @@
       BytesRef bytes = new BytesRef("1");
       IndexReaderContext topReaderContext = reader.getContext();
       for (AtomicReaderContext atomicReaderContext : topReaderContext.leaves()) {
-        DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions(
+        DocsEnum docsAndPosEnum = getDocsAndPositions(
             atomicReaderContext.reader(), bytes, null);
         assertNotNull(docsAndPosEnum);
         if (atomicReaderContext.reader().maxDoc() == 0) {
@@ -90,7 +90,7 @@
     directory.close();
   }
 
-  public DocsAndPositionsEnum getDocsAndPositions(AtomicReader reader,
+  public DocsEnum getDocsAndPositions(AtomicReader reader,
       BytesRef bytes, Bits liveDocs) throws IOException {
     Terms terms = reader.terms(fieldName);
     if (terms != null) {
@@ -147,7 +147,7 @@
       BytesRef bytes = new BytesRef("" + term);
       IndexReaderContext topReaderContext = reader.getContext();
       for (AtomicReaderContext atomicReaderContext : topReaderContext.leaves()) {
-        DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions(
+        DocsEnum docsAndPosEnum = getDocsAndPositions(
             atomicReaderContext.reader(), bytes, null);
         assertNotNull(docsAndPosEnum);
         int initDoc = 0;
@@ -301,7 +301,7 @@
 
       IndexReaderContext topReaderContext = reader.getContext();
       for (AtomicReaderContext atomicReaderContext : topReaderContext.leaves()) {
-        DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions(
+        DocsEnum docsAndPosEnum = getDocsAndPositions(
             atomicReaderContext.reader(), bytes, null);
         assertNotNull(docsAndPosEnum);
 
@@ -359,7 +359,7 @@
     writer.addDocument(doc);
     DirectoryReader reader = writer.getReader();
     AtomicReader r = getOnlySegmentReader(reader);
-    DocsAndPositionsEnum disi = r.termPositionsEnum(new Term("foo", "bar"));
+    DocsEnum disi = r.termPositionsEnum(new Term("foo", "bar"));
     int docid = disi.docID();
     assertEquals(-1, docid);
     assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
index 1984258..e85b7cd 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
@@ -128,7 +128,7 @@
     writer.close();
     SegmentReader reader = new SegmentReader(info, newIOContext(random()));
 
-    DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader),
+    DocsEnum termPositions = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader),
                                                                           "repeated", new BytesRef("repeated"));
     assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
     int freq = termPositions.freq();
@@ -200,7 +200,7 @@
     writer.close();
     SegmentReader reader = new SegmentReader(info, newIOContext(random()));
 
-    DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, reader.getLiveDocs(), "f1", new BytesRef("a"));
+    DocsEnum termPositions = MultiFields.getTermPositionsEnum(reader, reader.getLiveDocs(), "f1", new BytesRef("a"));
     assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
     int freq = termPositions.freq();
     assertEquals(3, freq);
@@ -243,7 +243,7 @@
     writer.close();
     SegmentReader reader = new SegmentReader(info, newIOContext(random()));
 
-    DocsAndPositionsEnum termPositions = reader.termPositionsEnum(new Term("preanalyzed", "term1"));
+    DocsEnum termPositions = reader.termPositionsEnum(new Term("preanalyzed", "term1"));
     assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
     assertEquals(1, termPositions.freq());
     assertEquals(0, termPositions.nextPosition());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java
index 6ea6e0b..2000f2e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java
@@ -17,9 +17,6 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.Random;
-
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
@@ -32,6 +29,9 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util._TestUtil;
 
+import java.io.IOException;
+import java.util.Random;
+
 /**
  * Compares one codec against another
  */
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFilterAtomicReader.java b/lucene/core/src/test/org/apache/lucene/index/TestFilterAtomicReader.java
index 0fe238a..38c116d 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestFilterAtomicReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestFilterAtomicReader.java
@@ -76,14 +76,14 @@
       }
 
       @Override
-      public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
-        return new TestPositions(super.docsAndPositions(liveDocs, reuse == null ? null : ((FilterDocsAndPositionsEnum) reuse).in, flags));
+      public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+        return new TestPositions(super.docsAndPositions(liveDocs, reuse == null ? null : ((FilterDocsEnum) reuse).in, flags));
       }
     }
 
     /** Filter that only returns odd numbered documents. */
-    private static class TestPositions extends FilterDocsAndPositionsEnum {
-      public TestPositions(DocsAndPositionsEnum in) {
+    private static class TestPositions extends FilterDocsEnum {
+      public TestPositions(DocsEnum in) {
         super(in);
       }
 
@@ -151,7 +151,7 @@
     
     assertEquals(TermsEnum.SeekStatus.FOUND, terms.seekCeil(new BytesRef("one")));
     
-    DocsAndPositionsEnum positions = terms.docsAndPositions(MultiFields.getLiveDocs(reader), null);
+    DocsEnum positions = terms.docsAndPositions(MultiFields.getLiveDocs(reader), null);
     while (positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
       assertTrue((positions.docID() % 2) == 1);
     }
@@ -189,7 +189,6 @@
     checkOverrideMethods(FilterAtomicReader.FilterTerms.class);
     checkOverrideMethods(FilterAtomicReader.FilterTermsEnum.class);
     checkOverrideMethods(FilterAtomicReader.FilterDocsEnum.class);
-    checkOverrideMethods(FilterAtomicReader.FilterDocsAndPositionsEnum.class);
   }
 
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
index 4f0bc60..7798961 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -926,7 +926,7 @@
     Terms tpv = r.getTermVectors(0).terms("field");
     TermsEnum termsEnum = tpv.iterator(null);
     assertNotNull(termsEnum.next());
-    DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+    DocsEnum dpEnum = termsEnum.docsAndPositions(null, null);
     assertNotNull(dpEnum);
     assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
     assertEquals(1, dpEnum.freq());
@@ -1655,7 +1655,7 @@
 
     // Make sure position is still incremented when
     // massive term is skipped:
-    DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(reader, null, "content", new BytesRef("another"));
+    DocsEnum tps = MultiFields.getTermPositionsEnum(reader, null, "content", new BytesRef("another"));
     assertEquals(0, tps.nextDoc());
     assertEquals(1, tps.freq());
     assertEquals(3, tps.nextPosition());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
index cd303f2..8851b1a 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
@@ -335,7 +335,7 @@
             TermsEnum termsEnum = tfv.iterator(null);
             assertEquals(new BytesRef(""+counter), termsEnum.next());
             assertEquals(1, termsEnum.totalTermFreq());
-            DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+            DocsEnum dpEnum = termsEnum.docsAndPositions(null, null);
             assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
             assertEquals(1, dpEnum.freq());
             assertEquals(1, dpEnum.nextPosition());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java b/lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
index 99b8e98..c2c7d07 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
@@ -155,7 +155,7 @@
         writer.close();
         IndexReader reader = DirectoryReader.open(directory);
 
-        DocsAndPositionsEnum tp = MultiFields.getTermPositionsEnum(reader,
+        DocsEnum tp = MultiFields.getTermPositionsEnum(reader,
                                                                    MultiFields.getLiveDocs(reader),
                                                                    this.field,
                                                                    new BytesRef("b"));
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java b/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java
index 90fa89a..f2e9bf3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java
@@ -167,7 +167,7 @@
         System.out.println("\nTEST: iter=" + iter + " doS1=" + doS1);
       }
         
-      final DocsAndPositionsEnum postings = MultiFields.getTermPositionsEnum(r, null, "field", new BytesRef(term));
+      final DocsEnum postings = MultiFields.getTermPositionsEnum(r, null, "field", new BytesRef(term));
 
       int docID = -1;
       while(docID < DocIdSetIterator.NO_MORE_DOCS) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java b/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
index 2ee0449..9e5bad1 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
@@ -84,7 +84,7 @@
     
     for (int i = 0; i < 2; i++) {
       counter = 0;
-      DocsAndPositionsEnum tp = reader.termPositionsEnum(term);
+      DocsEnum tp = reader.termPositionsEnum(term);
       checkSkipTo(tp, 14, 185); // no skips
       checkSkipTo(tp, 17, 190); // one skip on level 0
       checkSkipTo(tp, 287, 200); // one skip on level 1, two on level 0
@@ -95,7 +95,7 @@
     }
   }
 
-  public void checkSkipTo(DocsAndPositionsEnum tp, int target, int maxCounter) throws IOException {
+  public void checkSkipTo(DocsEnum tp, int target, int maxCounter) throws IOException {
     tp.advance(target);
     if (maxCounter < counter) {
       fail("Too many bytes read: " + counter + " vs " + maxCounter);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
index 77d2739..6391f01 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
@@ -184,7 +184,7 @@
 
         byte[] verifyPayloadData = new byte[payloadDataLength];
         offset = 0;
-        DocsAndPositionsEnum[] tps = new DocsAndPositionsEnum[numTerms];
+        DocsEnum[] tps = new DocsEnum[numTerms];
         for (int i = 0; i < numTerms; i++) {
           tps[i] = MultiFields.getTermPositionsEnum(reader,
                                                     MultiFields.getLiveDocs(reader),
@@ -215,7 +215,7 @@
         /*
          *  test lazy skipping
          */        
-        DocsAndPositionsEnum tp = MultiFields.getTermPositionsEnum(reader,
+        DocsEnum tp = MultiFields.getTermPositionsEnum(reader,
                                                                    MultiFields.getLiveDocs(reader),
                                                                    terms[0].field(),
                                                                    new BytesRef(terms[0].text()));
@@ -482,7 +482,7 @@
         IndexReader reader = DirectoryReader.open(dir);
         TermsEnum terms = MultiFields.getFields(reader).terms(field).iterator(null);
         Bits liveDocs = MultiFields.getLiveDocs(reader);
-        DocsAndPositionsEnum tp = null;
+        DocsEnum tp = null;
         while (terms.next() != null) {
           String termText = terms.term().utf8ToString();
           tp = terms.docsAndPositions(liveDocs, tp);
@@ -605,7 +605,7 @@
     writer.addDocument(doc);
     DirectoryReader reader = writer.getReader();
     AtomicReader sr = SlowCompositeReaderWrapper.wrap(reader);
-    DocsAndPositionsEnum de = sr.termPositionsEnum(new Term("field", "withPayload"));
+    DocsEnum de = sr.termPositionsEnum(new Term("field", "withPayload"));
     de.nextDoc();
     de.nextPosition();
     assertEquals(new BytesRef("test"), de.getPayload());
@@ -639,7 +639,7 @@
     writer.addDocument(doc);
     DirectoryReader reader = writer.getReader();
     SegmentReader sr = getOnlySegmentReader(reader);
-    DocsAndPositionsEnum de = sr.termPositionsEnum(new Term("field", "withPayload"));
+    DocsEnum de = sr.termPositionsEnum(new Term("field", "withPayload"));
     de.nextDoc();
     de.nextPosition();
     assertEquals(new BytesRef("test"), de.getPayload());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java b/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java
index 2cc02e7..91e95e8 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java
@@ -71,7 +71,7 @@
     assert terms != null;
     TermsEnum termsEnum = terms.iterator(null);
     assertTrue(termsEnum.seekExact(new BytesRef("withPayload")));
-    DocsAndPositionsEnum de = termsEnum.docsAndPositions(null, null);
+    DocsEnum de = termsEnum.docsAndPositions(null, null);
     assertEquals(0, de.nextDoc());
     assertEquals(0, de.nextPosition());
     assertEquals(new BytesRef("test"), de.getPayload());
@@ -113,7 +113,7 @@
     assert terms != null;
     TermsEnum termsEnum = terms.iterator(null);
     assertTrue(termsEnum.seekExact(new BytesRef("withPayload")));
-    DocsAndPositionsEnum de = termsEnum.docsAndPositions(null, null);
+    DocsEnum de = termsEnum.docsAndPositions(null, null);
     assertEquals(0, de.nextDoc());
     assertEquals(3, de.nextPosition());
     assertEquals(new BytesRef("test"), de.getPayload());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
index b745625..18daf3d 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
@@ -84,7 +84,7 @@
     IndexReader r = w.getReader();
     w.close();
 
-    DocsAndPositionsEnum dp = MultiFields.getTermPositionsEnum(r, null, "content", new BytesRef("a"));
+    DocsEnum dp = MultiFields.getTermPositionsEnum(r, null, "content", new BytesRef("a"));
     assertNotNull(dp);
     assertEquals(0, dp.nextDoc());
     assertEquals(2, dp.freq());
@@ -156,7 +156,7 @@
     String terms[] = { "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "hundred" };
     
     for (String term : terms) {
-      DocsAndPositionsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef(term));
+      DocsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef(term));
       int doc;
       while((doc = dp.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
         String storedNumbers = reader.document(doc).get("numbers");
@@ -184,7 +184,7 @@
     
     for (int j = 0; j < numSkippingTests; j++) {
       int num = _TestUtil.nextInt(random(), 100, Math.min(numDocs-1, 999));
-      DocsAndPositionsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef("hundred"));
+      DocsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef("hundred"));
       int doc = dp.advance(num);
       assertEquals(num, doc);
       int freq = dp.freq();
@@ -296,8 +296,8 @@
       //System.out.println("\nsub=" + sub);
       final TermsEnum termsEnum = sub.fields().terms("content").iterator(null);
       DocsEnum docs = null;
-      DocsAndPositionsEnum docsAndPositions = null;
-      DocsAndPositionsEnum docsAndPositionsAndOffsets = null;
+      DocsEnum docsAndPositions = null;
+      DocsEnum docsAndPositionsAndOffsets = null;
       final FieldCache.Ints docIDToID = FieldCache.DEFAULT.getInts(sub, "id", false);
       for(String term : terms) {
         //System.out.println("  term=" + term);
@@ -314,7 +314,7 @@
           }
 
           // explicitly exclude offsets here
-          docsAndPositions = termsEnum.docsAndPositions(null, docsAndPositions, DocsAndPositionsEnum.FLAG_PAYLOADS);
+          docsAndPositions = termsEnum.docsAndPositions(null, docsAndPositions, DocsEnum.FLAG_PAYLOADS);
           assertNotNull(docsAndPositions);
           //System.out.println("    doc/freq/pos");
           while((doc = docsAndPositions.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
index 239af28..37bb607 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
@@ -146,7 +146,7 @@
     assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
 
     
-    DocsAndPositionsEnum positions = MultiFields.getTermPositionsEnum(reader,
+    DocsEnum positions = MultiFields.getTermPositionsEnum(reader,
                                                                       MultiFields.getLiveDocs(reader),
                                                                       DocHelper.TEXT_FIELD_1_KEY,
                                                                       new BytesRef("field"));
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
index 9ce0b06..95584f4 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
@@ -393,7 +393,7 @@
         Fields tv1 = r1.getTermVectors(id1);
         System.out.println("  d1=" + tv1);
         if (tv1 != null) {
-          DocsAndPositionsEnum dpEnum = null;
+          DocsEnum dpEnum = null;
           DocsEnum dEnum = null;
           for (String field : tv1) {
             System.out.println("    " + field + ":");
@@ -425,7 +425,7 @@
         Fields tv2 = r2.getTermVectors(id2);
         System.out.println("  d2=" + tv2);
         if (tv2 != null) {
-          DocsAndPositionsEnum dpEnum = null;
+          DocsEnum dpEnum = null;
           DocsEnum dEnum = null;
           for (String field : tv2) {
             System.out.println("    " + field + ":");
@@ -613,8 +613,8 @@
       assertNotNull(terms2);
       TermsEnum termsEnum2 = terms2.iterator(null);
 
-      DocsAndPositionsEnum dpEnum1 = null;
-      DocsAndPositionsEnum dpEnum2 = null;
+      DocsEnum dpEnum1 = null;
+      DocsEnum dpEnum2 = null;
       DocsEnum dEnum1 = null;
       DocsEnum dEnum2 = null;
       
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
index de67216..dbd96f2 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
@@ -245,7 +245,7 @@
     assertNotNull(vector);
     assertEquals(testTerms.length, vector.size());
     TermsEnum termsEnum = vector.iterator(null);
-    DocsAndPositionsEnum dpEnum = null;
+    DocsEnum dpEnum = null;
     for (int i = 0; i < testTerms.length; i++) {
       final BytesRef text = termsEnum.next();
       assertNotNull(text);
@@ -302,7 +302,7 @@
     TermsEnum termsEnum = vector.iterator(null);
     assertNotNull(termsEnum);
     assertEquals(testTerms.length, vector.size());
-    DocsAndPositionsEnum dpEnum = null;
+    DocsEnum dpEnum = null;
     for (int i = 0; i < testTerms.length; i++) {
       final BytesRef text = termsEnum.next();
       assertNotNull(text);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
index bf2906a..d631764 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
@@ -67,7 +67,7 @@
     // Token "" occurred once
     assertEquals(1, termsEnum.totalTermFreq());
 
-    DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+    DocsEnum dpEnum = termsEnum.docsAndPositions(null, null);
     assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
     dpEnum.nextPosition();
     assertEquals(8, dpEnum.startOffset());
@@ -116,7 +116,7 @@
     IndexReader r = DirectoryReader.open(dir);
     TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
     assertNotNull(termsEnum.next());
-    DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+    DocsEnum dpEnum = termsEnum.docsAndPositions(null, null);
     assertEquals(2, termsEnum.totalTermFreq());
 
     assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -151,7 +151,7 @@
     IndexReader r = DirectoryReader.open(dir);
     TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
     assertNotNull(termsEnum.next());
-    DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+    DocsEnum dpEnum = termsEnum.docsAndPositions(null, null);
     assertEquals(2, termsEnum.totalTermFreq());
 
     assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -191,7 +191,7 @@
     IndexReader r = DirectoryReader.open(dir);
     TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
     assertNotNull(termsEnum.next());
-    DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+    DocsEnum dpEnum = termsEnum.docsAndPositions(null, null);
     assertEquals(2, termsEnum.totalTermFreq());
 
     assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -227,7 +227,7 @@
     IndexReader r = DirectoryReader.open(dir);
     TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
     assertNotNull(termsEnum.next());
-    DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+    DocsEnum dpEnum = termsEnum.docsAndPositions(null, null);
     assertEquals(2, termsEnum.totalTermFreq());
 
     assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -264,7 +264,7 @@
     IndexReader r = DirectoryReader.open(dir);
     TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
     assertNotNull(termsEnum.next());
-    DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+    DocsEnum dpEnum = termsEnum.docsAndPositions(null, null);
 
     assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
     dpEnum.nextPosition();
@@ -309,7 +309,7 @@
     IndexReader r = DirectoryReader.open(dir);
     TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
     assertNotNull(termsEnum.next());
-    DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+    DocsEnum dpEnum = termsEnum.docsAndPositions(null, null);
 
     assertEquals(1, (int) termsEnum.totalTermFreq());
     assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -352,7 +352,7 @@
     IndexReader r = DirectoryReader.open(dir);
     TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
     assertNotNull(termsEnum.next());
-    DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+    DocsEnum dpEnum = termsEnum.docsAndPositions(null, null);
 
     assertEquals(1, (int) termsEnum.totalTermFreq());
     assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
diff --git a/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java b/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java
index b4bf0a4..482f610 100644
--- a/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java
+++ b/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java
@@ -17,17 +17,18 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.FieldInvertState;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.index.FieldInvertState;
 import org.apache.lucene.util.PriorityQueue;
 
+import java.io.IOException;
+
 /**
  * Holds all implementations of classes in the o.a.l.search package as a
  * back-compatibility test. It does not run any tests per-se, however if 
@@ -255,7 +256,12 @@
     public int advance(int target) {
       throw new UnsupportedOperationException(UNSUPPORTED_MSG);
     }
-    
+
+    @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
     @Override
     public long cost() {
       throw new UnsupportedOperationException(UNSUPPORTED_MSG);
@@ -347,7 +353,7 @@
 
     @Override
     public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-        boolean topScorer, Bits acceptDocs) {
+        boolean topScorer, PostingFeatures flags, Bits acceptDocs) {
       throw new UnsupportedOperationException(UNSUPPORTED_MSG);
     }
     
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanOr.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanOr.java
index a25121b..171f623 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanOr.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanOr.java
@@ -26,6 +26,7 @@
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SerialMergeScheduler;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Weight.PostingFeatures;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.LuceneTestCase;
@@ -184,7 +185,7 @@
     Weight w = s.createNormalizedWeight(bq);
 
     assertEquals(1, s.getIndexReader().leaves().size());
-    Scorer scorer = w.scorer(s.getIndexReader().leaves().get(0), false, true, null);
+    Scorer scorer = w.scorer(s.getIndexReader().getContext().leaves().get(0), false, true, PostingFeatures.DOCS_AND_FREQS, null);
 
     final FixedBitSet hits = new FixedBitSet(docCount);
     final AtomicInteger end = new AtomicInteger();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQuery.java
index 342d1cb..67b750d 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQuery.java
@@ -17,6 +17,7 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
@@ -36,6 +37,11 @@
 import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.Weight.PostingFeatures;
+import org.apache.lucene.search.intervals.IntervalFilterQuery;
+import org.apache.lucene.search.intervals.RangeIntervalFilter;
+import org.apache.lucene.search.intervals.WithinIntervalFilter;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanTermQuery;
@@ -234,7 +240,7 @@
       Weight weight = s.createNormalizedWeight(q);
 
       Scorer scorer = weight.scorer(s.leafContexts.get(0),
-                                          true, false, null);
+                                          true, false, PostingFeatures.DOCS_AND_FREQS, null);
 
       // First pass: just use .nextDoc() to gather all hits
       final List<ScoreDoc> hits = new ArrayList<ScoreDoc>();
@@ -252,7 +258,7 @@
 
         weight = s.createNormalizedWeight(q);
         scorer = weight.scorer(s.leafContexts.get(0),
-                               true, false, null);
+                               true, false, PostingFeatures.DOCS_AND_FREQS, null);
 
         if (VERBOSE) {
           System.out.println("  iter2=" + iter2);
@@ -290,6 +296,57 @@
     r.close();
     d.close();
   }
+  
+ public void testConjunctionPositions() throws IOException {
+     Directory directory = newDirectory();
+     RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
+         newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
+     {
+       Document doc = new Document();
+       doc.add(newField(
+           "field",
+           "Pease porridge hot! Pease porridge cold! Pease porridge in the pot nine days old! Some like it hot, some"
+               + " like it cold, Some like it in the pot nine days old! Pease porridge hot! Pease porridge cold!",
+           TextField.TYPE_STORED));
+       writer.addDocument(doc);
+     }
+     
+     {
+       Document doc = new Document();
+       doc.add(newField(
+           "field",
+           "Pease porridge cold! Pease porridge hot! Pease porridge in the pot nine days old! Some like it cold, some"
+               + " like it hot, Some like it in the pot nine days old! Pease porridge cold! Pease porridge hot!",
+               TextField.TYPE_STORED));
+       writer.addDocument(doc);
+     }
+     
+     IndexReader reader = writer.getReader();
+     IndexSearcher searcher = new IndexSearcher(reader);
+     writer.close();
+     BooleanQuery query = new BooleanQuery();
+     query.add(new BooleanClause(new TermQuery(new Term("field", "porridge")), Occur.MUST));
+     query.add(new BooleanClause(new TermQuery(new Term("field", "pease")), Occur.MUST));
+     query.add(new BooleanClause(new TermQuery(new Term("field", "hot!")), Occur.MUST));
+     
+     {
+       IntervalFilterQuery filter = new IntervalFilterQuery(query, new RangeIntervalFilter(0,3));
+       TopDocs search = searcher.search(filter, 10);
+       ScoreDoc[] scoreDocs = search.scoreDocs;
+       assertEquals(1, search.totalHits);
+       assertEquals(0, scoreDocs[0].doc);
+     }
+     {
+       IntervalFilterQuery filter = new IntervalFilterQuery(query, new WithinIntervalFilter(3));
+       TopDocs search = searcher.search(filter, 10);
+       ScoreDoc[] scoreDocs = search.scoreDocs;
+       assertEquals(2, search.totalHits);
+       assertEquals(0, scoreDocs[0].doc);
+       assertEquals(1, scoreDocs[1].doc);
+     }
+     reader.close();
+     directory.close();
+   }
 
   // LUCENE-4477 / LUCENE-4401:
   public void testBooleanSpanQuery() throws Exception {
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
index ffeac52..b253e6f 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
@@ -17,6 +17,7 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
@@ -29,6 +30,7 @@
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.BooleanQuery.BooleanWeight;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 
@@ -91,6 +93,10 @@
       @Override public int advance(int target) {
         return doc = target <= 3000 ? 3000 : NO_MORE_DOCS;
       }
+      @Override
+      public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+        return null;
+      }
       
       @Override
       public long cost() {
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java
index f493d42..6288624 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java
@@ -18,6 +18,7 @@
  */
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.util.LuceneTestCase;
 
 import java.io.IOException;
@@ -46,6 +47,11 @@
 
     @Override
     public int advance(int target) throws IOException { return 0; }
+
+    @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      return IntervalIterator.NO_MORE_INTERVALS;
+    }
     
     @Override
     public long cost() {
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
index dcaf023..3f0d6c6 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
@@ -34,6 +34,7 @@
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Weight.PostingFeatures;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.search.spans.SpanQuery;
@@ -180,7 +181,7 @@
     assertTrue(s.getTopReaderContext() instanceof AtomicReaderContext);
     final Weight dw = s.createNormalizedWeight(dq);
     AtomicReaderContext context = (AtomicReaderContext)s.getTopReaderContext();
-    final Scorer ds = dw.scorer(context, true, false, context.reader().getLiveDocs());
+    final Scorer ds = dw.scorer(context, true, false, PostingFeatures.DOCS_AND_FREQS, context.reader().getLiveDocs());
     final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS;
     if (skipOk) {
       fail("firsttime skipTo found a match? ... "
@@ -196,7 +197,7 @@
     QueryUtils.check(random(), dq, s);
     final Weight dw = s.createNormalizedWeight(dq);
     AtomicReaderContext context = (AtomicReaderContext)s.getTopReaderContext();
-    final Scorer ds = dw.scorer(context, true, false, context.reader().getLiveDocs());
+    final Scorer ds = dw.scorer(context, true, false, PostingFeatures.DOCS_AND_FREQS, context.reader().getLiveDocs());
     assertTrue("firsttime skipTo found no match",
         ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
     assertEquals("found wrong docid", "d4", r.document(ds.docID()).get("id"));
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java b/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java
index c8e963c..7b7f459 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java
@@ -17,14 +17,6 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.SortedSetDocValuesField;
@@ -42,11 +34,19 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util._TestUtil;
 import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util._TestUtil;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
 /** tests BooleanScorer2's minShouldMatch */
 @SuppressCodecs({"Lucene40", "Lucene41"})
 public class TestMinShouldMatch2 extends LuceneTestCase {
@@ -126,7 +126,7 @@
     if (slow) {
       return new SlowMinShouldMatchScorer(weight, reader, searcher);
     } else {
-      return weight.scorer(reader.getContext(), true, false, null);
+      return weight.scorer(reader.getContext(), true, false, Weight.PostingFeatures.DOCS_AND_FREQS, null);
     }
   }
   
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java b/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java
index 95bbcfb..04f3317 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java
@@ -31,7 +31,7 @@
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
@@ -100,7 +100,7 @@
 
     IndexSearcher searcher = newSearcher(reader);
     
-    DocsAndPositionsEnum pos = MultiFields.getTermPositionsEnum(searcher.getIndexReader(),
+    DocsEnum pos = MultiFields.getTermPositionsEnum(searcher.getIndexReader(),
                                                                 MultiFields.getLiveDocs(searcher.getIndexReader()),
                                                                 "field",
                                                                 new BytesRef("1"));
@@ -212,7 +212,7 @@
     final IndexReader readerFromWriter = writer.getReader();
     AtomicReader r = SlowCompositeReaderWrapper.wrap(readerFromWriter);
 
-    DocsAndPositionsEnum tp = r.termPositionsEnum(new Term("content", "a"));
+    DocsEnum tp = r.termPositionsEnum(new Term("content", "a"));
     
     int count = 0;
     assertTrue(tp.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java
index 1f52b21..362ffb6 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java
@@ -20,9 +20,12 @@
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 
+import java.io.IOException;
+
 public class TestPositiveScoresOnlyCollector extends LuceneTestCase {
 
   private static final class SimpleScorer extends Scorer {
@@ -50,7 +53,12 @@
       idx = target;
       return idx < scores.length ? idx : NO_MORE_DOCS;
     }
-    
+
+    @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      throw new UnsupportedOperationException();
+    }
+
     @Override
     public long cost() {
       return scores.length;
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java
index 9c6f486..58ddac2 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java
@@ -17,15 +17,16 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 
+import java.io.IOException;
+
 public class TestScoreCachingWrappingScorer extends LuceneTestCase {
 
   private static final class SimpleScorer extends Scorer {
@@ -58,7 +59,11 @@
       doc = target;
       return doc < scores.length ? doc : NO_MORE_DOCS;
     }
-    
+
+    @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      throw new UnsupportedOperationException(); 
+    }
     @Override
     public long cost() {
       return scores.length;
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
index 0c4e229..b6e70cb 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
@@ -29,6 +29,7 @@
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Weight.PostingFeatures;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -78,7 +79,7 @@
     Weight weight = indexSearcher.createNormalizedWeight(termQuery);
     assertTrue(indexSearcher.getTopReaderContext() instanceof AtomicReaderContext);
     AtomicReaderContext context = (AtomicReaderContext)indexSearcher.getTopReaderContext();
-    Scorer ts = weight.scorer(context, true, true, context.reader().getLiveDocs());
+    Scorer ts = weight.scorer(context, true, true, PostingFeatures.DOCS_AND_FREQS, context.reader().getLiveDocs());
     // we have 2 documents with the term all in them, one document for all the
     // other values
     final List<TestHit> docs = new ArrayList<TestHit>();
@@ -140,7 +141,7 @@
     Weight weight = indexSearcher.createNormalizedWeight(termQuery);
     assertTrue(indexSearcher.getTopReaderContext() instanceof AtomicReaderContext);
     AtomicReaderContext context = (AtomicReaderContext) indexSearcher.getTopReaderContext();
-    Scorer ts = weight.scorer(context, true, false, context.reader().getLiveDocs());
+    Scorer ts = weight.scorer(context, true, false, PostingFeatures.DOCS_AND_FREQS, context.reader().getLiveDocs());
     assertTrue("next did not return a doc",
         ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
     assertTrue("score is not correct", ts.score() == 1.6931472f);
@@ -159,7 +160,7 @@
     Weight weight = indexSearcher.createNormalizedWeight(termQuery);
     assertTrue(indexSearcher.getTopReaderContext() instanceof AtomicReaderContext);
     AtomicReaderContext context = (AtomicReaderContext) indexSearcher.getTopReaderContext();
-    Scorer ts = weight.scorer(context, true, false, context.reader().getLiveDocs());
+    Scorer ts = weight.scorer(context, true, false, PostingFeatures.DOCS_AND_FREQS, context.reader().getLiveDocs());
     assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
     // The next doc should be doc 5
     assertTrue("doc should be number 5", ts.docID() == 5);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java b/lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java
index 23d8a54..2524ae2 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java
@@ -17,8 +17,6 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
@@ -26,7 +24,7 @@
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -42,6 +40,8 @@
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
+import java.io.IOException;
+
 public class TestTermVectors extends LuceneTestCase {
   private static IndexReader reader;
   private static Directory directory;
@@ -136,7 +136,7 @@
     assertNotNull(termsEnum.next());
     assertEquals("one", termsEnum.term().utf8ToString());
     assertEquals(5, termsEnum.totalTermFreq());
-    DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+    DocsEnum dpEnum = termsEnum.docsAndPositions(null, null);
     assertNotNull(dpEnum);
     assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
     assertEquals(5, dpEnum.freq());
diff --git a/lucene/core/src/test/org/apache/lucene/search/posfilter/IntervalTestBase.java b/lucene/core/src/test/org/apache/lucene/search/posfilter/IntervalTestBase.java
new file mode 100644
index 0000000..f554577
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/search/posfilter/IntervalTestBase.java
@@ -0,0 +1,218 @@
+package org.apache.lucene.search.posfilter;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.CheckHits;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.PositionsCollector;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+public abstract class IntervalTestBase extends LuceneTestCase {
+
+  protected Directory directory;
+  protected IndexReader reader;
+  protected IndexSearcher searcher;
+
+  /**
+   * Run a query against a searcher, and check that the collected intervals from the query match
+   * the expected results.
+   * @param q the query
+   * @param searcher the searcher
+   * @param expectedResults an int[][] detailing the expected results, in the format
+   *                        { { docid1, startoffset1, endoffset1, startoffset2, endoffset2, ... },
+   *                          { docid2, startoffset1, endoffset1, startoffset2, endoffset2, ...}, ... }
+   * @throws IOException
+   */
+  public static void checkIntervalOffsets(Query q, IndexSearcher searcher, int[][] expectedResults) throws IOException {
+
+    //MatchCollector m = new MatchCollector();
+    PositionsCollector c = new PositionsCollector(expectedResults.length + 1);
+    searcher.search(q, c);
+
+    PositionsCollector.DocPositions[] matches = c.getPositions();
+    Assert.assertEquals("Incorrect number of hits", expectedResults.length, c.getNumDocs());
+    for (int i = 0; i < expectedResults.length; i++) {
+      int expectedDocMatches[] = expectedResults[i];
+      int docid = expectedDocMatches[0];
+      Iterator<Interval> matchIt = matches[i].positions.iterator();
+      for (int j = 1; j < expectedDocMatches.length; j += 2) {
+        String expectation = "Expected match at docid " + docid + ", offset " + expectedDocMatches[j];
+        Assert.assertTrue(expectation, matchIt.hasNext());
+        Interval match = matchIt.next();
+        System.err.println(match);
+        Assert.assertEquals("Incorrect docid", matches[i].doc, docid);
+        Assert.assertEquals("Incorrect match offset", expectedDocMatches[j], match.offsetBegin);
+        Assert.assertEquals("Incorrect match end offset", expectedDocMatches[j + 1], match.offsetEnd);
+      }
+      Assert.assertFalse("Unexpected matches!", matchIt.hasNext());
+    }
+
+  }
+
+  /**
+   * Run a query against a searcher, and check that the collected intervals from the query match
+   * the expected results.
+   * @param q the query
+   * @param searcher the searcher
+   * @param expectedResults an int[][] detailing the expected results, in the format
+   *                        { { docid1, startpos1, endpos1, startpos2, endpos2, ... },
+   *                          { docid2, startpos1, endpos1, startpos2, endpos2, ...}, ... }
+   * @throws IOException
+   */
+  public static void checkIntervals(Query q, IndexSearcher searcher, int[][] expectedResults) throws IOException {
+
+    PositionsCollector c = new PositionsCollector(expectedResults.length + 1);
+    searcher.search(q, c);
+
+    PositionsCollector.DocPositions[] matches = c.getPositions();
+    Assert.assertEquals("Incorrect number of hits", expectedResults.length, c.getNumDocs());
+    for (int i = 0; i < expectedResults.length; i++) {
+      int expectedDocMatches[] = expectedResults[i];
+      int docid = expectedDocMatches[0];
+      Iterator<Interval> matchIt = matches[i].positions.iterator();
+      for (int j = 1; j < expectedDocMatches.length; j += 2) {
+        String expectation = "Expected match at docid " + docid + ", position " + expectedDocMatches[j];
+        Assert.assertTrue(expectation, matchIt.hasNext());
+        Interval match = matchIt.next();
+        System.err.println(docid + ":" + match);
+        Assert.assertEquals("Incorrect docid", matches[i].doc, docid);
+        Assert.assertEquals("Incorrect match start position", expectedDocMatches[j], match.begin);
+        Assert.assertEquals("Incorrect match end position", expectedDocMatches[j + 1], match.end);
+      }
+      Assert.assertFalse("Unexpected matches!", matchIt.hasNext());
+    }
+
+  }
+
+  public static void checkScores(Query q, IndexSearcher searcher, int... expectedDocs) throws IOException {
+    TopDocs hits = searcher.search(q, 1000);
+    Assert.assertEquals("Wrong number of hits", expectedDocs.length, hits.totalHits);
+    for (int i = 0; i < expectedDocs.length; i++) {
+      Assert.assertEquals("Docs not scored in order", expectedDocs[i], hits.scoreDocs[i].doc);
+    }
+    CheckHits.checkExplanations(q, "field", searcher);
+  }
+
+  protected abstract void addDocs(RandomIndexWriter writer) throws IOException;
+
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+    directory = newDirectory();
+    IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+    //config.setCodec(Codec.forName("SimpleText"));
+    //config.setCodec(Codec.forName("Asserting"));
+    RandomIndexWriter writer = new RandomIndexWriter(random(), directory, config);
+    addDocs(writer);
+    reader = writer.getReader();
+    writer.close();
+    searcher = new IndexSearcher(reader);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    reader.close();
+    directory.close();
+    super.tearDown();
+  }
+
+  public TermQuery makeTermQuery(String text) {
+    return new TermQuery(new Term(TestBasicIntervals.field, text));
+  }
+
+  protected Query makeOrQuery(Query... queries) {
+    BooleanQuery q = new BooleanQuery();
+    for (Query subquery : queries) {
+      q.add(subquery, BooleanClause.Occur.SHOULD);
+    }
+    return q;
+  }
+
+  protected Query makeAndQuery(Query... queries) {
+    BooleanQuery q = new BooleanQuery();
+    for (Query subquery : queries) {
+      q.add(subquery, BooleanClause.Occur.MUST);
+    }
+    return q;
+  }
+
+  protected Query makeBooleanQuery(BooleanClause... clauses) {
+    BooleanQuery q = new BooleanQuery();
+    for (BooleanClause clause : clauses) {
+      q.add(clause);
+    }
+    return q;
+  }
+
+  protected BooleanClause makeBooleanClause(String text, BooleanClause.Occur occur) {
+    return new BooleanClause(makeTermQuery(text), occur);
+  }
+
+  public static class Match implements Comparable<Match> {
+
+    public final int docid;
+    public final int start;
+    public final int end;
+    public final int startOffset;
+    public final int endOffset;
+    public final boolean composite;
+
+    public Match(int docid, Interval interval, boolean composite) {
+      this.docid = docid;
+      this.start = interval.begin;
+      this.end = interval.end;
+      this.startOffset = interval.offsetBegin;
+      this.endOffset = interval.offsetEnd;
+      this.composite = composite;
+    }
+
+    @Override
+    public int compareTo(Match o) {
+      if (this.docid != o.docid)
+        return this.docid - o.docid;
+      if (this.start != o.start)
+        return this.start - o.start;
+      return o.end - this.end;
+    }
+
+    @Override
+    public String toString() {
+      return String.format("%d:%d[%d]->%d[%d]%s",
+                            docid, start, startOffset, end, endOffset, composite ? "C" : "");
+    }
+  }
+
+
+}
diff --git a/lucene/core/src/test/org/apache/lucene/search/posfilter/TestBasicIntervals.java b/lucene/core/src/test/org/apache/lucene/search/posfilter/TestBasicIntervals.java
new file mode 100644
index 0000000..793f6d9
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/search/posfilter/TestBasicIntervals.java
@@ -0,0 +1,191 @@
+package org.apache.lucene.search.posfilter;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Query;
+
+import java.io.IOException;
+
+public class TestBasicIntervals extends IntervalTestBase {
+
+  public static final String field = "field";
+
+  @Override
+  protected void addDocs(RandomIndexWriter writer) throws IOException {
+    for (String content : docFields) {
+      Document doc = new Document();
+      doc.add(newField(field, content, TextField.TYPE_NOT_STORED));
+      writer.addDocument(doc);
+    }
+  }
+  
+  private String[] docFields = {
+      "w1 w2 w3 w4 w5", //0
+      "w1 w3 w2 w3",//1
+      "w1 xx w2 yy w3",//2
+      "w1 w3 xx w2 yy w3",//3
+      "u2 u2 u1", //4
+      "u2 xx u2 u1",//5
+      "u2 u2 xx u1", //6
+      "u2 xx u2 yy u1", //7
+      "u2 xx u1 u2",//8
+      "u1 u2 xx u2",//9
+      "u2 u1 xx u2",//10
+      "t1 t2 t1 t3 t2 t3",//11
+      "v1 v2 v3",//12
+      "v1 v3 v2 v3 v4",//13
+      "v4 v2 v2 v4",//14
+      "v3 v4 v3"};//15
+
+  public void testSimpleConjunction() throws IOException {
+    Query q = makeAndQuery(makeTermQuery("v2"), makeTermQuery("v4"));
+    checkIntervals(q, searcher, new int[][]{
+        { 13, 2, 2, 4, 4 },
+        { 14, 0, 0, 1, 1, 2, 2, 3, 3 }
+    });
+  }
+
+  public void testExclusion() throws IOException {
+    Query q = makeBooleanQuery(makeBooleanClause("v2", BooleanClause.Occur.MUST),
+                               makeBooleanClause("v3", BooleanClause.Occur.MUST_NOT));
+    checkIntervals(q, searcher, new int[][]{
+        { 14, 1, 1, 2, 2 }
+    });
+  }
+
+  public void testOptExclusion() throws IOException {
+    Query q = makeBooleanQuery(makeBooleanClause("w2", BooleanClause.Occur.SHOULD),
+                               makeBooleanClause("w3", BooleanClause.Occur.SHOULD),
+                               makeBooleanClause("xx", BooleanClause.Occur.MUST_NOT));
+    checkIntervals(q, searcher, new int[][]{
+        { 0, 1, 1, 2, 2 },
+        { 1, 1, 1, 2, 2, 3, 3 }
+    });
+  }
+
+  public void testNestedConjunctions() throws IOException {
+    Query q = makeAndQuery(makeTermQuery("v2"), makeOrQuery(makeTermQuery("v3"), makeTermQuery("v4")));
+    checkIntervals(q, searcher, new int[][]{
+        { 12, 1, 1, 2, 2 },
+        { 13, 1, 1, 2, 2, 3, 3, 4, 4 },
+        { 14, 0, 0, 1, 1, 2, 2, 3, 3 }
+    });
+  }
+
+  public void testSingleRequiredManyOptional() throws IOException {
+    Query q = makeBooleanQuery(makeBooleanClause("v2", BooleanClause.Occur.MUST),
+                               makeBooleanClause("v3", BooleanClause.Occur.SHOULD),
+                               makeBooleanClause("v4", BooleanClause.Occur.SHOULD));
+    checkIntervals(q, searcher, new int[][]{
+        { 12, 1, 1, 2, 2 },
+        { 13, 1, 1, 2, 2, 3, 3, 4, 4 },
+        { 14, 0, 0, 1, 1, 2, 2, 3, 3 }
+    });
+  }
+
+  public void testSimpleTerm() throws IOException {
+    Query q = makeTermQuery("u2");
+    checkIntervals(q, searcher, new int[][]{
+        { 4, 0, 0, 1, 1 },
+        { 5, 0, 0, 2, 2 },
+        { 6, 0, 0, 1, 1 },
+        { 7, 0, 0, 2, 2 },
+        { 8, 0, 0, 3, 3 },
+        { 9, 1, 1, 3, 3 },
+        { 10, 0, 0, 3, 3 }
+    });
+  }
+
+  public void testBasicDisjunction() throws IOException {
+    Query q = makeOrQuery(makeTermQuery("v3"), makeTermQuery("v2"));
+    checkIntervals(q, searcher, new int[][]{
+        { 12, 1, 1, 2, 2 },
+        { 13, 1, 1, 2, 2, 3, 3 },
+        { 14, 1, 1, 2, 2 },
+        { 15, 0, 0, 2, 2 }
+    });
+  }
+
+
+  
+  public void testOrSingle() throws Exception {
+    Query q = makeOrQuery(makeTermQuery("w5"));
+    checkIntervals(q, searcher, new int[][]{
+        { 0, 4, 4 }
+    });
+  }
+
+  public void testOrPartialMatch() throws Exception {
+    Query q = makeOrQuery(makeTermQuery("w5"), makeTermQuery("xx"));
+    checkIntervals(q, searcher, new int[][]{
+        { 0, 4, 4 },
+        { 2, 1, 1 },
+        { 3, 2, 2 },
+        { 5, 1, 1 },
+        { 6, 2, 2 },
+        { 7, 1, 1 },
+        { 8, 1, 1 },
+        { 9, 2, 2 },
+        { 10, 2, 2 },
+    });
+  }
+
+  public void testOrDisjunctionMatch() throws Exception {
+    Query q = makeOrQuery(makeTermQuery("w5"), makeTermQuery("yy"));
+    checkIntervals(q, searcher, new int[][]{
+        { 0, 4, 4 },
+        { 2, 3, 3 },
+        { 3, 4, 4 },
+        { 7, 3, 3 }
+    });
+  }
+
+  // "t1 t2 t1 t3 t2 t3"
+  //  -----------
+  //     --------
+  //        --------
+  public void testOrSingleDocument() throws Exception {
+    Query q = makeOrQuery(makeTermQuery("t1"), makeTermQuery("t2"), makeTermQuery("t3"));
+    checkIntervals(q, searcher, new int[][]{
+        { 11, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5 }
+    });
+  }
+
+  // andnot(andnot(w1, or(w2, flurble)), or(foo, bar))
+  public void testConjunctionExclusionQuery() throws IOException {
+    BooleanQuery andnotinner = new BooleanQuery();
+    andnotinner.add(makeTermQuery("w1"), BooleanClause.Occur.MUST);
+    BooleanQuery andnotinneror = new BooleanQuery();
+    andnotinneror.add(makeTermQuery("w2"), BooleanClause.Occur.SHOULD);
+    andnotinneror.add(makeTermQuery("flurble"), BooleanClause.Occur.SHOULD);
+    andnotinner.add(andnotinneror, BooleanClause.Occur.MUST_NOT);
+    BooleanQuery outer = new BooleanQuery();
+    outer.add(andnotinner, BooleanClause.Occur.MUST);
+    BooleanQuery andnotouteror = new BooleanQuery();
+    andnotouteror.add(makeTermQuery("foo"), BooleanClause.Occur.SHOULD);
+    andnotouteror.add(makeTermQuery("bar"), BooleanClause.Occur.SHOULD);
+    outer.add(andnotouteror, BooleanClause.Occur.MUST_NOT);
+    checkIntervals(outer, searcher, new int[][]{});
+  }
+  
+}
diff --git a/lucene/core/src/test/org/apache/lucene/search/posfilter/TestBrouwerianQuery.java b/lucene/core/src/test/org/apache/lucene/search/posfilter/TestBrouwerianQuery.java
new file mode 100644
index 0000000..5acc039
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/search/posfilter/TestBrouwerianQuery.java
@@ -0,0 +1,106 @@
+package org.apache.lucene.search.posfilter;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.posfilter.NonOverlappingQuery;
+import org.apache.lucene.search.posfilter.OrderedNearQuery;
+import org.apache.lucene.search.posfilter.UnorderedNearQuery;
+
+import java.io.IOException;
+
+public class TestBrouwerianQuery extends IntervalTestBase {
+  
+  protected void addDocs(RandomIndexWriter writer) throws IOException {
+    {
+      Document doc = new Document();
+      doc.add(newField(
+          "field",
+          "The quick brown fox jumps over the lazy dog",
+              TextField.TYPE_STORED));
+      writer.addDocument(doc);
+    }
+    
+    {
+      Document doc = new Document();
+      doc.add(newField(
+          "field",
+          "The quick brown duck jumps over the lazy dog with the quick brown fox jumps",
+              TextField.TYPE_STORED));
+      writer.addDocument(doc);
+    }
+  }
+  
+  public void testBrouwerianBooleanQuery() throws IOException {
+
+    Query query = new OrderedNearQuery(2, makeTermQuery("the"),
+                                        makeTermQuery("quick"), makeTermQuery("jumps"));
+    Query sub = makeTermQuery("fox");
+    NonOverlappingQuery q = new NonOverlappingQuery(query, sub);
+
+    checkIntervals(q, searcher, new int[][]{
+        { 1, 0, 4 }
+    });
+  }
+
+  public void testBrouwerianBooleanQueryExcludedDoesNotExist() throws IOException {
+
+    Query query = new OrderedNearQuery(2, makeTermQuery("the"),
+        makeTermQuery("quick"), makeTermQuery("jumps"));
+    Query sub = makeTermQuery("blox");
+    NonOverlappingQuery q = new NonOverlappingQuery(query, sub);
+
+    checkIntervals(q, searcher, new int[][]{
+        { 0, 0, 4 },
+        { 1, 0, 4, 10, 14 }
+    });
+  }
+
+  public void testBrouwerianOverlapQuery() throws IOException {
+    // We want to find 'jumps NOT WITHIN 2 positions of fox'
+    Query sub = new UnorderedNearQuery(2, makeTermQuery("jumps"), makeTermQuery("fox"));
+    Query query = makeTermQuery("jumps");
+    NonOverlappingQuery q = new NonOverlappingQuery(query, sub);
+
+    checkIntervals(q, searcher, new int[][]{
+        { 1, 4, 4 }
+    });
+  }
+
+  public void testBrouwerianNonExistentOverlapQuery() throws IOException {
+    Query sub = new UnorderedNearQuery(2, makeTermQuery("dog"), makeTermQuery("over"));
+    Query query = makeTermQuery("dog");
+    NonOverlappingQuery q = new NonOverlappingQuery(query, sub);
+
+    checkIntervals(q, searcher, new int[][]{});
+  }
+
+  public void testBrouwerianExistentOverlapQuery() throws IOException {
+    Query sub = new UnorderedNearQuery(1, makeTermQuery("dog"), makeTermQuery("over"));
+    Query query = makeTermQuery("dog");
+    NonOverlappingQuery q = new NonOverlappingQuery(query, sub);
+
+    checkIntervals(q, searcher, new int[][]{
+        { 0, 8, 8 },
+        { 1, 8, 8 }
+    });
+  }
+
+}
diff --git a/lucene/core/src/test/org/apache/lucene/search/posfilter/TestIntervalScoring.java b/lucene/core/src/test/org/apache/lucene/search/posfilter/TestIntervalScoring.java
new file mode 100644
index 0000000..1659ac6
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/search/posfilter/TestIntervalScoring.java
@@ -0,0 +1,63 @@
+package org.apache.lucene.search.posfilter;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.RegexpQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.posfilter.OrderedNearQuery;
+import org.junit.Assert;
+
+import java.io.IOException;
+
+public class TestIntervalScoring extends IntervalTestBase {
+
+  @Override
+  protected void addDocs(RandomIndexWriter writer) throws IOException {
+    for (String content : docFields) {
+      Document doc = new Document();
+      doc.add(newField("field", content, TextField.TYPE_NOT_STORED));
+      writer.addDocument(doc);
+    }
+  }
+
+  private String[] docFields = {
+      "Should we, could we, would we?",
+      "It should -  would it?",
+      "It shouldn't",
+      "Should we, should we, should we"
+  };
+
+  public void testOrderedNearQueryScoring() throws IOException {
+    OrderedNearQuery q = new OrderedNearQuery(10, makeTermQuery("should"),
+                                                  makeTermQuery("would"));
+    checkScores(q, searcher, 1, 0);
+  }
+
+  public void testEmptyMultiTermQueryScoring() throws IOException {
+    OrderedNearQuery q = new OrderedNearQuery(10, new RegexpQuery(new Term("field", "bar.*")),
+                                                  new RegexpQuery(new Term("field", "foo.*")));
+    TopDocs docs = searcher.search(q, 10);
+    assertEquals(docs.totalHits, 0);
+  }
+
+
+}
diff --git a/lucene/core/src/test/org/apache/lucene/search/posfilter/TestMultiPhraseQuery2.java b/lucene/core/src/test/org/apache/lucene/search/posfilter/TestMultiPhraseQuery2.java
new file mode 100644
index 0000000..663db0f
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/search/posfilter/TestMultiPhraseQuery2.java
@@ -0,0 +1,570 @@
+package org.apache.lucene.search.posfilter;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.CollectionStatistics;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TermStatistics;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.similarities.DefaultSimilarity;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+
+import java.io.IOException;
+import java.util.LinkedList;
+
+/**
+ * This class tests the PhraseQuery2 class.
+ *
+ *
+ */
+public class TestMultiPhraseQuery2 extends LuceneTestCase {
+
+  public void testPhrasePrefix() throws IOException {
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore);
+    add("blueberry pie", writer);
+    add("blueberry strudel", writer);
+    add("blueberry pizza", writer);
+    add("blueberry chewing gum", writer);
+    add("bluebird pizza", writer);
+    add("bluebird foobar pizza", writer);
+    add("piccadilly circus", writer);
+
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = newSearcher(reader);
+
+    // search for "blueberry pi*":
+    PhraseQuery2 query1 = new PhraseQuery2();
+    // search for "strawberry pi*":
+    PhraseQuery2 query2 = new PhraseQuery2();
+    query1.add(new Term("body", "blueberry"));
+    query2.add(new Term("body", "strawberry"));
+
+    LinkedList<Term> termsWithPrefix = new LinkedList<Term>();
+
+    // this TermEnum gives "piccadilly", "pie" and "pizza".
+    String prefix = "pi";
+    TermsEnum te = MultiFields.getFields(reader).terms("body").iterator(null);
+    te.seekCeil(new BytesRef(prefix));
+    do {
+      String s = te.term().utf8ToString();
+      if (s.startsWith(prefix)) {
+        termsWithPrefix.add(new Term("body", s));
+      } else {
+        break;
+      }
+    } while (te.next() != null);
+
+    query1.addMultiTerm(termsWithPrefix.toArray(new Term[0]));
+    //assertEquals("body:\"blueberry (piccadilly pie pizza)\"", query1.toString());
+    query2.addMultiTerm(termsWithPrefix.toArray(new Term[0]));
+    //assertEquals("body:\"strawberry (piccadilly pie pizza)\"", query2.toString());
+
+    ScoreDoc[] result;
+    result = searcher.search(query1, null, 1000).scoreDocs;
+    assertEquals(2, result.length);
+    result = searcher.search(query2, null, 1000).scoreDocs;
+    assertEquals(0, result.length);
+
+    // search for "blue* pizza":
+    PhraseQuery2 query3 = new PhraseQuery2(1);
+    termsWithPrefix.clear();
+    prefix = "blue";
+    te.seekCeil(new BytesRef(prefix));
+
+    do {
+      if (te.term().utf8ToString().startsWith(prefix)) {
+        termsWithPrefix.add(new Term("body", te.term().utf8ToString()));
+      }
+    } while (te.next() != null);
+
+    query3.addMultiTerm(termsWithPrefix.toArray(new Term[0]));
+    query3.add(new Term("body", "pizza"));
+
+    result = searcher.search(query3, null, 1000).scoreDocs;
+    //assertEquals(2, result.length); // blueberry pizza, bluebird pizza
+    //assertEquals("body:\"(blueberry bluebird) pizza\"", query3.toString());
+
+    result = searcher.search(query3, null, 1000).scoreDocs;
+
+    // just make sure no exc:
+    searcher.explain(query3, 0);
+
+    assertEquals(3, result.length); // blueberry pizza, bluebird pizza, bluebird
+    // foobar pizza
+
+    PhraseQuery2 query4 = new PhraseQuery2();
+    try {
+      query4.add(new Term("field1", "foo"));
+      query4.add(new Term("field2", "foobar"));
+      fail();
+    } catch (IllegalArgumentException e) {
+      // okay, all terms must belong to the same field
+    }
+
+    writer.close();
+    reader.close();
+    indexStore.close();
+  }
+
+  // LUCENE-2580
+  public void testTall() throws IOException {
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore);
+    add("blueberry chocolate pie", writer);
+    add("blueberry chocolate tart", writer);
+    IndexReader r = writer.getReader();
+    writer.close();
+
+    IndexSearcher searcher = newSearcher(r);
+    PhraseQuery2 q = new PhraseQuery2();
+    q.add(new Term("body", "blueberry"));
+    q.add(new Term("body", "chocolate"));
+    q.addMultiTerm(new Term[]{new Term("body", "pie"), new Term("body", "tart")});
+    assertEquals(2, searcher.search(q, 1).totalHits);
+    r.close();
+    indexStore.close();
+  }
+
+  //@Ignore //LUCENE-3821 fixes sloppy phrase scoring, except for this known problem
+  public void testMultiSloppyWithRepeats() throws IOException {
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore);
+    add("a b c d e f g h i k", writer);
+    IndexReader r = writer.getReader();
+    writer.close();
+
+    IndexSearcher searcher = newSearcher(r);
+
+    PhraseQuery2 q = new PhraseQuery2(6);
+    // this will fail, when the scorer would propagate [a] rather than [a,b],
+    q.addMultiTerm(new Term[]{new Term("body", "a"), new Term("body", "b")});
+    q.addMultiTerm(new Term[]{new Term("body", "a")});
+    assertEquals(1, searcher.search(q, 1).totalHits); // should match on "a b"
+
+    r.close();
+    indexStore.close();
+  }
+
+  /*
+  public void testMultiExactWithRepeats() throws IOException {
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore);
+    add("a b c d e f g h i k", writer);
+    IndexReader r = writer.getReader();
+    writer.close();
+
+    IndexSearcher searcher = newSearcher(r);
+    PhraseQuery2 q = new PhraseQuery2();
+    q.addMultiTerm(new Term[]{new Term("body", "a"), new Term("body", "d")}, 0);
+    q.addMultiTerm(new Term[]{new Term("body", "a"), new Term("body", "f")}, 2);
+    assertEquals(1, searcher.search(q, 1).totalHits); // should match on "a b"
+    r.close();
+    indexStore.close();
+  }
+  */
+
+  private void add(String s, RandomIndexWriter writer) throws IOException {
+    Document doc = new Document();
+    doc.add(newTextField("body", s, Field.Store.YES));
+    writer.addDocument(doc);
+  }
+
+  public void testBooleanQueryContainingSingleTermPrefixQuery()
+      throws IOException {
+    // this tests against bug 33161 (now fixed)
+    // In order to cause the bug, the outer query must have more than one term
+    // and all terms required.
+    // The contained PhraseMultiQuery must contain exactly one term array.
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore);
+    add("blueberry pie", writer);
+    add("blueberry chewing gum", writer);
+    add("blue raspberry pie", writer);
+
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = newSearcher(reader);
+    // This query will be equivalent to +body:pie +body:"blue*"
+    BooleanQuery q = new BooleanQuery();
+    q.add(new TermQuery(new Term("body", "pie")), BooleanClause.Occur.MUST);
+
+    PhraseQuery2 trouble = new PhraseQuery2();
+    trouble.addMultiTerm(new Term[]{new Term("body", "blueberry"),
+        new Term("body", "blue")});
+    q.add(trouble, BooleanClause.Occur.MUST);
+
+    // exception will be thrown here without fix
+    ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
+
+    assertEquals("Wrong number of hits", 2, hits.length);
+
+    // just make sure no exc:
+    searcher.explain(q, 0);
+
+    writer.close();
+    reader.close();
+    indexStore.close();
+  }
+
+  public void testPhrasePrefixWithBooleanQuery() throws IOException {
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore);
+    add("This is a test", "object", writer);
+    add("a note", "note", writer);
+
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = newSearcher(reader);
+
+    // This query will be equivalent to +type:note +body:"a t*"
+    BooleanQuery q = new BooleanQuery();
+    q.add(new TermQuery(new Term("type", "note")), BooleanClause.Occur.MUST);
+
+    PhraseQuery2 trouble = new PhraseQuery2();
+    trouble.add(new Term("body", "a"));
+    trouble
+        .addMultiTerm(new Term[]{new Term("body", "test"), new Term("body", "this")});
+    q.add(trouble, BooleanClause.Occur.MUST);
+
+    // exception will be thrown here without fix for #35626:
+    ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
+    assertEquals("Wrong number of hits", 0, hits.length);
+    writer.close();
+    reader.close();
+    indexStore.close();
+  }
+
+  public void testNoDocs() throws Exception {
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore);
+    add("a note", "note", writer);
+
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = newSearcher(reader);
+
+    PhraseQuery2 q = new PhraseQuery2();
+    q.add(new Term("body", "a"));
+    q.addMultiTerm(new Term[]{new Term("body", "nope"), new Term("body", "nope")});
+    assertEquals("Wrong number of hits", 0,
+        searcher.search(q, null, 1).totalHits);
+
+    // just make sure no exc:
+    searcher.explain(q, 0);
+
+    writer.close();
+    reader.close();
+    indexStore.close();
+  }
+
+  public void testHashCodeAndEquals() {
+    PhraseQuery2 query1 = new PhraseQuery2();
+    PhraseQuery2 query2 = new PhraseQuery2();
+
+    assertEquals(query1.hashCode(), query2.hashCode());
+    assertEquals(query1, query2);
+
+    Term term1 = new Term("someField", "someText");
+
+    query1.add(term1);
+    query2.add(term1);
+
+    assertEquals(query1.hashCode(), query2.hashCode());
+    assertEquals(query1, query2);
+
+    Term term2 = new Term("someField", "someMoreText");
+
+    query1.add(term2);
+
+    assertFalse(query1.hashCode() == query2.hashCode());
+    assertFalse(query1.equals(query2));
+
+    query2.add(term2);
+
+    assertEquals(query1.hashCode(), query2.hashCode());
+    assertEquals(query1, query2);
+  }
+
+  private void add(String s, String type, RandomIndexWriter writer)
+      throws IOException {
+    Document doc = new Document();
+    doc.add(newTextField("body", s, Field.Store.YES));
+    doc.add(newStringField("type", type, Field.Store.NO));
+    writer.addDocument(doc);
+  }
+
+  // LUCENE-2526
+  public void testEmptyToString() {
+    new PhraseQuery2().toString();
+  }
+
+  public void testCustomIDF() throws Exception {
+    Directory indexStore = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore);
+    add("This is a test", "object", writer);
+    add("a note", "note", writer);
+
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = newSearcher(reader);
+    searcher.setSimilarity(new DefaultSimilarity() {
+      @Override
+      public Explanation idfExplain(CollectionStatistics collectionStats, TermStatistics termStats[]) {
+        return new Explanation(10f, "just a test");
+      }
+    });
+
+    PhraseQuery2 query = new PhraseQuery2();
+    query.addMultiTerm(new Term[]{new Term("body", "this"), new Term("body", "that")});
+    query.add(new Term("body", "is"));
+    Weight weight = query.createWeight(searcher);
+    assertEquals(10f * 10f, weight.getValueForNormalization(), 0.001f);
+
+    writer.close();
+    reader.close();
+    indexStore.close();
+  }
+
+  /*
+
+  public void testZeroPosIncr() throws IOException {
+    Directory dir = new RAMDirectory();
+    final Token[] tokens = new Token[3];
+    tokens[0] = new Token();
+    tokens[0].append("a");
+    tokens[0].setPositionIncrement(1);
+    tokens[1] = new Token();
+    tokens[1].append("b");
+    tokens[1].setPositionIncrement(0);
+    tokens[2] = new Token();
+    tokens[2].append("c");
+    tokens[2].setPositionIncrement(0);
+
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(new TextField("field", new CannedTokenStream(tokens)));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new TextField("field", new CannedTokenStream(tokens)));
+    writer.addDocument(doc);
+    IndexReader r = writer.getReader();
+    writer.close();
+    IndexSearcher s = new IndexSearcher(r);
+    PhraseQuery2 mpq = new PhraseQuery2();
+    //mpq.setSlop(1);
+
+    // NOTE: not great that if we do the else clause here we
+    // get different scores!  PhraseQuery2 counts that
+    // phrase as occurring twice per doc (it should be 1, I
+    // think?).  This is because MultipleTermPositions is able to
+    // return the same position more than once (0, in this
+    // case):
+    if (true) {
+      mpq.add(new Term[] {new Term("field", "b"), new Term("field", "c")}, 0);
+      mpq.add(new Term[] {new Term("field", "a")}, 0);
+    } else {
+      mpq.add(new Term[] {new Term("field", "a")}, 0);
+      mpq.add(new Term[] {new Term("field", "b"), new Term("field", "c")}, 0);
+    }
+    TopDocs hits = s.search(mpq, 2);
+    assertEquals(2, hits.totalHits);
+    assertEquals(hits.scoreDocs[0].score, hits.scoreDocs[1].score, 1e-5);
+    /*
+    for(int hit=0;hit<hits.totalHits;hit++) {
+      ScoreDoc sd = hits.scoreDocs[hit];
+      System.out.println("  hit doc=" + sd.doc + " score=" + sd.score);
+    }
+    *//*
+    r.close();
+    dir.close();
+  }
+
+  private static Token makeToken(String text, int posIncr) {
+    final Token t = new Token();
+    t.append(text);
+    t.setPositionIncrement(posIncr);
+    return t;
+  }
+
+  private final static Token[] INCR_0_DOC_TOKENS = new Token[] {
+      makeToken("x", 1),
+      makeToken("a", 1),
+      makeToken("1", 0),
+      makeToken("m", 1),  // not existing, relying on slop=2
+      makeToken("b", 1),
+      makeToken("1", 0),
+      makeToken("n", 1), // not existing, relying on slop=2
+      makeToken("c", 1),
+      makeToken("y", 1)
+  };
+
+  private final static Token[] INCR_0_QUERY_TOKENS_AND = new Token[] {
+      makeToken("a", 1),
+      makeToken("1", 0),
+      makeToken("b", 1),
+      makeToken("1", 0),
+      makeToken("c", 1)
+  };
+
+  private final static Token[][] INCR_0_QUERY_TOKENS_AND_OR_MATCH = new Token[][] {
+      { makeToken("a", 1) },
+      { makeToken("x", 1), makeToken("1", 0) },
+      { makeToken("b", 2) },
+      { makeToken("x", 2), makeToken("1", 0) },
+      { makeToken("c", 3) }
+  };
+
+  private final static Token[][] INCR_0_QUERY_TOKENS_AND_OR_NO_MATCHN = new Token[][] {
+      { makeToken("x", 1) },
+      { makeToken("a", 1), makeToken("1", 0) },
+      { makeToken("x", 2) },
+      { makeToken("b", 2), makeToken("1", 0) },
+      { makeToken("c", 3) }
+  };
+
+  /**
+   * using query parser, MPQ will be created, and will not be strict about having all query terms 
+   * in each position - one of each position is sufficient (OR logic)
+   *//*
+  public void testZeroPosIncrSloppyParsedAnd() throws IOException {
+    PhraseQuery2 q = new PhraseQuery2();
+    q.add(new Term[]{ new Term("field", "a"), new Term("field", "1") }, -1);
+    q.add(new Term[]{ new Term("field", "b"), new Term("field", "1") }, 0);
+    q.add(new Term[]{ new Term("field", "c") }, 1);
+    doTestZeroPosIncrSloppy(q, 0);
+    q.setSlop(1);
+    doTestZeroPosIncrSloppy(q, 0);
+    q.setSlop(2);
+    doTestZeroPosIncrSloppy(q, 1);
+  }
+
+  private void doTestZeroPosIncrSloppy(Query q, int nExpected) throws IOException {
+    Directory dir = newDirectory(); // random dir
+    IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT, null);
+    IndexWriter writer = new IndexWriter(dir, cfg);
+    Document doc = new Document();
+    doc.add(new TextField("field", new CannedTokenStream(INCR_0_DOC_TOKENS)));
+    writer.addDocument(doc);
+    IndexReader r = DirectoryReader.open(writer,false);
+    writer.close();
+    IndexSearcher s = new IndexSearcher(r);
+
+    if (VERBOSE) {
+      System.out.println("QUERY=" + q);
+    }
+
+    TopDocs hits = s.search(q, 1);
+    assertEquals("wrong number of results", nExpected, hits.totalHits);
+
+    if (VERBOSE) {
+      for(int hit=0;hit<hits.totalHits;hit++) {
+        ScoreDoc sd = hits.scoreDocs[hit];
+        System.out.println("  hit doc=" + sd.doc + " score=" + sd.score);
+      }
+    }
+
+    r.close();
+    dir.close();
+  }
+
+  /**
+   * PQ AND Mode - Manually creating a phrase query
+   *//*
+  public void testZeroPosIncrSloppyPqAnd() throws IOException {
+    final PhraseQuery pq = new PhraseQuery();
+    int pos = -1;
+    for (Token tap : INCR_0_QUERY_TOKENS_AND) {
+      pos += tap.getPositionIncrement();
+      pq.add(new Term("field",tap.toString()), pos);
+    }
+    doTestZeroPosIncrSloppy(pq, 0);
+    pq.setSlop(1);
+    doTestZeroPosIncrSloppy(pq, 0);
+    pq.setSlop(2);
+    doTestZeroPosIncrSloppy(pq, 1);
+  }
+
+  /**
+   * MPQ AND Mode - Manually creating a multiple phrase query
+   *//*
+  public void testZeroPosIncrSloppyMpqAnd() throws IOException {
+    final PhraseQuery2 mpq = new PhraseQuery2();
+    int pos = -1;
+    for (Token tap : INCR_0_QUERY_TOKENS_AND) {
+      pos += tap.getPositionIncrement();
+      mpq.add(new Term[]{new Term("field",tap.toString())}, pos); //AND logic
+    }
+    doTestZeroPosIncrSloppy(mpq, 0);
+    mpq.setSlop(1);
+    doTestZeroPosIncrSloppy(mpq, 0);
+    mpq.setSlop(2);
+    doTestZeroPosIncrSloppy(mpq, 1);
+  }
+
+  /**
+   * MPQ Combined AND OR Mode - Manually creating a multiple phrase query
+   *//*
+  public void testZeroPosIncrSloppyMpqAndOrMatch() throws IOException {
+    final PhraseQuery2 mpq = new PhraseQuery2();
+    for (Token tap[] : INCR_0_QUERY_TOKENS_AND_OR_MATCH) {
+      Term[] terms = tapTerms(tap);
+      final int pos = tap[0].getPositionIncrement()-1;
+      mpq.add(terms, pos); //AND logic in pos, OR across lines 
+    }
+    doTestZeroPosIncrSloppy(mpq, 0);
+    mpq.setSlop(1);
+    doTestZeroPosIncrSloppy(mpq, 0);
+    mpq.setSlop(2);
+    doTestZeroPosIncrSloppy(mpq, 1);
+  }
+
+  /**
+   * MPQ Combined AND OR Mode - Manually creating a multiple phrase query - with no match
+   *//*
+  public void testZeroPosIncrSloppyMpqAndOrNoMatch() throws IOException {
+    final PhraseQuery2 mpq = new PhraseQuery2();
+    for (Token tap[] : INCR_0_QUERY_TOKENS_AND_OR_NO_MATCHN) {
+      Term[] terms = tapTerms(tap);
+      final int pos = tap[0].getPositionIncrement()-1;
+      mpq.add(terms, pos); //AND logic in pos, OR across lines 
+    }
+    doTestZeroPosIncrSloppy(mpq, 0);
+    mpq.setSlop(2);
+    doTestZeroPosIncrSloppy(mpq, 0);
+  }
+
+  private Term[] tapTerms(Token[] tap) {
+    Term[] terms = new Term[tap.length];
+    for (int i=0; i<terms.length; i++) {
+      terms[i] = new Term("field",tap[i].toString());
+    }
+    return terms;
+  }
+*/
+}
diff --git a/lucene/core/src/test/org/apache/lucene/search/posfilter/TestNestedPositionFilterQueries.java b/lucene/core/src/test/org/apache/lucene/search/posfilter/TestNestedPositionFilterQueries.java
new file mode 100644
index 0000000..c93ad5b
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/search/posfilter/TestNestedPositionFilterQueries.java
@@ -0,0 +1,122 @@
+package org.apache.lucene.search.posfilter;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Query;
+
+import java.io.IOException;
+
+public class TestNestedPositionFilterQueries extends IntervalTestBase {
+
+  @Override
+  protected void addDocs(RandomIndexWriter writer) throws IOException {
+    for (int i = 0; i < docFields.length; i++) {
+      Document doc = new Document();
+      doc.add(newField("field", docFields[i], TextField.TYPE_STORED));
+      writer.addDocument(doc);
+    }
+  }
+
+  private String[] docFields = {
+    "w1 w2 w3 w4 w5 w6 w7 w8 w9 w10 w11 w12", //0
+    "w1 w3 w4 w5 w6 w7 w8", //1
+    "w1 w3 w10 w4 w5 w6 w7 w8", //2
+    "w1 w3 w2 w4 w5 w6 w7 w8", //3
+  };
+
+  public void testOrderedDisjunctionQueries() throws IOException {
+    // Two phrases whose subparts appear in a document, but that do not fulfil the slop
+    // requirements of the parent IntervalFilterQuery
+    Query sentence1 = new OrderedNearQuery(0, makeTermQuery("w1"), makeTermQuery("w8"), makeTermQuery("w4"));
+    Query sentence2 = new OrderedNearQuery(0, makeTermQuery("w3"), makeTermQuery("w7"), makeTermQuery("w6"));
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(sentence1, BooleanClause.Occur.SHOULD);
+    bq.add(sentence2, BooleanClause.Occur.SHOULD);
+    checkIntervals(bq, searcher, new int[][]{});
+  }
+
+  public void testFilterDisjunctionQuery() throws IOException {
+    Query near1 = makeTermQuery("w4");
+    Query near2 = new OrderedNearQuery(3, makeTermQuery("w1"), makeTermQuery("w10"));
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(near1, BooleanClause.Occur.SHOULD);
+    bq.add(near2, BooleanClause.Occur.SHOULD);
+    checkIntervals(bq, searcher, new int[][]{
+        { 0, 3, 3 },
+        { 1, 2, 2 },
+        { 2, 0, 2, 3, 3 },
+        { 3, 3, 3 }
+    });
+  }
+
+  // or(w1 pre/2 w2, w1 pre/3 w10)
+  public void testOrNearNearQuery() throws IOException {
+    Query near1 = new OrderedNearQuery(2, makeTermQuery("w1"), makeTermQuery("w2"));
+    Query near2 = new OrderedNearQuery(3, makeTermQuery("w1"), makeTermQuery("w10"));
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(near1, BooleanClause.Occur.SHOULD);
+    bq.add(near2, BooleanClause.Occur.SHOULD);
+    checkIntervals(bq, searcher, new int[][]{
+        { 0, 0, 1 },
+        { 2, 0, 2 },
+        { 3, 0, 2 }
+    });
+  }
+
+  // or(w2 within/2 w1, w10 within/3 w1)
+  public void testUnorderedNearNearQuery() throws IOException {
+    Query near1 = new UnorderedNearQuery(2, makeTermQuery("w2"), makeTermQuery("w1"));
+    Query near2 = new UnorderedNearQuery(3, makeTermQuery("w10"), makeTermQuery("w1"));
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(near1, BooleanClause.Occur.SHOULD);
+    bq.add(near2, BooleanClause.Occur.SHOULD);
+    checkIntervals(bq, searcher, new int[][]{
+        {0, 0, 1},
+        {2, 0, 2},
+        {3, 0, 2}
+    });
+  }
+
+  // (a pre/2 b) pre/6 (c pre/2 d)
+  public void testNearNearNearQuery() throws IOException {
+    Query near1 = new OrderedNearQuery(2, makeTermQuery("w1"), makeTermQuery("w4"));
+    Query near2 = new OrderedNearQuery(2, makeTermQuery("w10"), makeTermQuery("w12"));
+    Query near3 = new OrderedNearQuery(6, near1, near2);
+    checkIntervals(near3, searcher, new int[][]{
+        { 0, 0, 11 }
+    });
+  }
+
+  public void testOrNearNearNonExistentQuery() throws IOException {
+    Query near1 = new OrderedNearQuery(2, makeTermQuery("w1"), makeTermQuery("w12"));
+    Query near2 = new OrderedNearQuery(2, makeTermQuery("w3"), makeTermQuery("w8"));
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(near1, BooleanClause.Occur.SHOULD);
+    bq.add(near2, BooleanClause.Occur.SHOULD);
+    BooleanQuery wrapper = new BooleanQuery();
+    wrapper.add(bq, BooleanClause.Occur.MUST);
+    wrapper.add(makeTermQuery("foo"), BooleanClause.Occur.MUST_NOT);
+    checkIntervals(wrapper, searcher, new int[][]{});
+  }
+
+}
diff --git a/lucene/core/src/test/org/apache/lucene/search/posfilter/TestPhraseQuery2.java b/lucene/core/src/test/org/apache/lucene/search/posfilter/TestPhraseQuery2.java
new file mode 100644
index 0000000..cf3f08ae
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/search/posfilter/TestPhraseQuery2.java
@@ -0,0 +1,167 @@
+package org.apache.lucene.search.posfilter;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+
+import java.io.IOException;
+
+public class TestPhraseQuery2 extends IntervalTestBase {
+  
+  protected void addDocs(RandomIndexWriter writer) throws IOException {
+    {
+      Document doc = new Document();
+      doc.add(newField(
+          "field",
+        //  0       1      2     3     4       5     6      7      8   9  10   11   12   13
+          "Pease porridge hot! Pease porridge cold! Pease porridge in the pot nine days old! "
+        //  14   15  16 17    18   19  20  21    22   23  24 25 26  27   28   29   30
+        + "Some like it hot, some like it cold, Some like it in the pot nine days old! "
+        //  31      32     33    34     35     36
+        + "Pease porridge hot! Pease porridge cold!",
+              TextField.TYPE_STORED));
+      writer.addDocument(doc);
+    }
+    
+    {
+      Document doc = new Document();
+      doc.add(newField(
+          "field",
+        //  0       1      2     3     4       5     6      7      8   9  10   11   12   13
+          "Pease porridge cold! Pease porridge hot! Pease porridge in the pot nine days old! "
+        //  14   15  16 17    18   19  20  21    22   23  24 25 26  27   28   29   30
+        + "Some like it cold, some like it hot, Some like it in the pot nine days old! "
+        //  31      32     33    34     35     36
+        + "Pease porridge cold! Pease porridge hot!",
+          TextField.TYPE_STORED));
+      writer.addDocument(doc);
+    }
+  }
+
+  public void testDuplicatedMultiPhraseQuery() throws Exception {
+    PhraseQuery2 query = new PhraseQuery2(6);
+    query.addMultiTerm(new Term("field", "pot"), new Term("field", "nine"));
+    query.addMultiTerm(new Term("field", "nine"));
+    checkIntervals(query, searcher, new int[][]{
+        { 0, 10, 11, 27, 28 },
+        { 1, 10, 11, 27, 28 }
+    });
+  }
+
+  public void testSloppyPhraseQuery() throws IOException {
+    PhraseQuery2 query = new PhraseQuery2(1);
+    query.add(new Term("field", "pease"));
+    query.add(new Term("field", "hot!"));
+    checkIntervals(query, searcher, new int[][]{
+        { 0, 0, 2, 31, 33 },
+        { 1, 3, 5, 34, 36 }
+    });
+  }
+
+  public void testManyTermSloppyPhraseQuery() throws IOException {
+    PhraseQuery2 query = new PhraseQuery2(2);
+    query.add(new Term("field", "pease"));
+    query.add(new Term("field", "porridge"));
+    query.add(new Term("field", "pot"));
+    checkIntervals(query, searcher, new int[][]{
+        { 0, 6, 10 },
+        { 1, 6, 10 }
+    });
+  }
+
+  public void testOutOfOrderSloppyPhraseQuery() throws IOException {
+    PhraseQuery2 query = new PhraseQuery2(1);
+    query.add(new Term("field", "pease"));
+    query.add(new Term("field", "cold!"));
+    query.add(new Term("field", "porridge"));
+    checkIntervals(query, searcher, new int[][]{
+        { 0, 3, 5, 34, 36 },
+        { 1, 0, 2, 31, 33 }
+    });
+  }
+
+  public void testMultiPhrases() throws IOException {
+
+    PhraseQuery2 q = new PhraseQuery2();
+    q.add(new Term("field", "pease"));
+    q.add(new Term("field", "porridge"));
+    q.addMultiTerm(new Term("field", "hot!"), new Term("field", "cold!"));
+
+    checkIntervals(q, searcher, new int[][]{
+        { 0, 0, 2, 3, 5, 31, 33, 34, 36 },
+        { 1, 0, 2, 3, 5, 31, 33, 34, 36 }
+    });
+  }
+
+  public void testOverlaps() throws IOException {
+    PhraseQuery2 q = new PhraseQuery2();
+    q.add(new Term("field", "some"));
+    q.add(new Term("field", "like"));
+    q.add(new Term("field", "it"));
+    q.add(new Term("field", "cold,"));
+    q.add(new Term("field", "some"));
+    q.add(new Term("field", "like"));
+    checkIntervals(q, searcher, new int[][]{
+        {0, 18, 23},
+        {1, 14, 19}
+    });
+  }
+
+  public void testMatching() throws IOException {
+
+    PhraseQuery2 q = new PhraseQuery2();
+    q.add(new Term("field", "pease"));
+    q.add(new Term("field", "porridge"));
+    q.add(new Term("field", "hot!"));
+
+    checkIntervals(q, searcher, new int[][]{
+        {0, 0, 2, 31, 33},
+        {1, 3, 5, 34, 36}
+    });
+
+  }
+
+  public void testPartialMatching() throws IOException {
+
+    PhraseQuery2 q = new PhraseQuery2();
+    q.add(new Term("field", "pease"));
+    q.add(new Term("field", "porridge"));
+    q.add(new Term("field", "hot!"));
+    q.add(new Term("field", "pease"));
+    q.add(new Term("field", "porridge"));
+    q.add(new Term("field", "cold!"));
+
+    checkIntervals(q, searcher, new int[][]{
+        {0, 0, 5, 31, 36},
+    });
+
+  }
+
+  public void testNonMatching() throws IOException {
+
+    PhraseQuery2 q = new PhraseQuery2();
+    q.add(new Term("field", "pease"));
+    q.add(new Term("field", "hot!"));
+
+    checkIntervals(q, searcher, new int[][]{});
+
+  }
+
+
+}
diff --git a/lucene/core/src/test/org/apache/lucene/search/posfilter/TestPositionFilteredIntervals.java b/lucene/core/src/test/org/apache/lucene/search/posfilter/TestPositionFilteredIntervals.java
new file mode 100644
index 0000000..dfdc430
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/search/posfilter/TestPositionFilteredIntervals.java
@@ -0,0 +1,226 @@
+package org.apache.lucene.search.posfilter;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.posfilter.OrderedNearQuery;
+import org.apache.lucene.search.posfilter.UnorderedNearQuery;
+
+import java.io.IOException;
+
+public class TestPositionFilteredIntervals extends IntervalTestBase {
+
+  public static final String field = "field";
+
+  @Override
+  protected void addDocs(RandomIndexWriter writer) throws IOException {
+    for (String content : docFields) {
+      Document doc = new Document();
+      doc.add(newField(field, content, TextField.TYPE_NOT_STORED));
+      writer.addDocument(doc);
+    }
+  }
+
+  private String[] docFields = {
+      "w1 w2 w3 w4 w5", //0
+      "w1 w3 w2 w3",//1
+      "w1 xx w2 yy w3",//2
+      "w1 w3 xx w2 yy w3",//3
+      "u2 u2 u1", //4
+      "u2 xx u2 u1",//5
+      "u2 u2 xx u1", //6
+      "u2 xx u2 yy u1", //7
+      "u2 xx u1 u2",//8
+      "u1 u2 xx u2",//9
+      "u2 u1 xx u2",//10
+      "t1 t2 t1 t3 t2 t3",//11
+      "v1 v2 v3",//12
+      "v1 v3 v2 v3 v4",//13
+      "v4 v2 v2 v4",//14
+      "v3 v4 v3"};//15
+
+  public void testNearOrdered01() throws Exception {
+    Query q = new OrderedNearQuery(0, makeTermQuery("w1"), makeTermQuery("w2"), makeTermQuery("w3"));
+    checkIntervals(q, searcher, new int[][]{
+        { 0, 0, 2 }
+    });
+  }
+
+  public void testNearOrdered02() throws Exception {
+    Query q = new OrderedNearQuery(1, makeTermQuery("w1"), makeTermQuery("w2"), makeTermQuery("w3"));
+    checkIntervals(q, searcher, new int[][]{
+        { 0, 0, 2 },
+        { 1, 0, 3 }
+    });
+  }
+
+  public void testNearOrdered03() throws Exception {
+    Query q = new OrderedNearQuery(2, makeTermQuery("w1"), makeTermQuery("w2"), makeTermQuery("w3"));
+    checkIntervals(q, searcher, new int[][]{
+        { 0, 0, 2 },
+        { 1, 0, 3 },
+        { 2, 0, 4 }
+    });
+  }
+
+  public void testNearOrdered04() throws Exception {
+    Query q = new OrderedNearQuery(3, makeTermQuery("w1"), makeTermQuery("w2"), makeTermQuery("w3"));
+    checkIntervals(q, searcher, new int[][]{
+        { 0, 0, 2 },
+        { 1, 0, 3 },
+        { 2, 0, 4 },
+        { 3, 0, 5 }
+    });
+  }
+
+  public void testNearOrdered05() throws Exception {
+    Query q = new OrderedNearQuery(4, makeTermQuery("w1"), makeTermQuery("w2"), makeTermQuery("w3"));
+    checkIntervals(q, searcher, new int[][]{
+        { 0, 0, 2 },
+        { 1, 0, 3 },
+        { 2, 0, 4 },
+        { 3, 0, 5 }
+    });
+  }
+
+  public void testNearOrderedEqual01() throws Exception {
+    Query q = new OrderedNearQuery(0, makeTermQuery("w1"), makeTermQuery("w3"), makeTermQuery("w3"));
+    checkIntervals(q, searcher, new int[][]{});
+  }
+
+  public void testNearOrderedEqual02() throws Exception {
+    Query q = new OrderedNearQuery(1, makeTermQuery("w1"), makeTermQuery("w3"), makeTermQuery("w3"));
+    checkIntervals(q, searcher, new int[][]{
+        { 1, 0, 3 }
+    });
+  }
+
+  public void testNearOrderedEqual03() throws Exception {
+    Query q = new OrderedNearQuery(2, makeTermQuery("w1"), makeTermQuery("w3"), makeTermQuery("w3"));
+    checkIntervals(q, searcher, new int[][]{
+        { 1, 0, 3 }
+    });
+  }
+
+  public void testNearOrderedEqual04() throws Exception {
+    Query q = new OrderedNearQuery(3, makeTermQuery("w1"), makeTermQuery("w3"), makeTermQuery("w3"));
+    checkIntervals(q, searcher, new int[][]{
+        { 1, 0, 3 },
+        { 3, 0, 5 }
+    });
+  }
+
+  public void testNearOrderedEqual11() throws Exception {
+    Query q = new OrderedNearQuery(0, makeTermQuery("u2"), makeTermQuery("u2"), makeTermQuery("u1"));
+    checkIntervals(q, searcher, new int[][]{
+        { 4, 0, 2 }
+    });
+  }
+
+  public void testNearOrderedEqual13() throws Exception {
+    Query q = new OrderedNearQuery(1, makeTermQuery("u2"), makeTermQuery("u2"), makeTermQuery("u1"));
+    checkIntervals(q, searcher, new int[][]{
+        { 4, 0, 2 },
+        { 5, 0, 3 },
+        { 6, 0, 3 }
+    });
+  }
+
+  public void testNearOrderedEqual14() throws Exception {
+    Query q = new OrderedNearQuery(2, makeTermQuery("u2"), makeTermQuery("u2"), makeTermQuery("u1"));
+    checkIntervals(q, searcher, new int[][]{
+        { 4, 0, 2 },
+        { 5, 0, 3 },
+        { 6, 0, 3 },
+        { 7, 0, 4 }
+    });
+  }
+
+  public void testNearOrderedEqual15() throws Exception {
+    Query q = new OrderedNearQuery(3, makeTermQuery("u2"), makeTermQuery("u2"), makeTermQuery("u1"));
+    checkIntervals(q, searcher, new int[][]{
+        { 4, 0, 2 },
+        { 5, 0, 3 },
+        { 6, 0, 3 },
+        { 7, 0, 4 }
+    });
+  }
+
+  public void testNearOrderedOverlap() throws Exception {
+    Query q = new OrderedNearQuery(3, makeTermQuery("t1"), makeTermQuery("t2"), makeTermQuery("t3"));
+    checkIntervals(q, searcher, new int[][]{
+        { 11, 0, 3, 2, 5 }
+    });
+  }
+
+  public void testNearUnordered() throws Exception {
+    Query q = new UnorderedNearQuery(0, makeTermQuery("u1"), makeTermQuery("u2"));
+    checkIntervals(q, searcher, new int[][]{
+        { 4, 1, 2 },
+        { 5, 2, 3 },
+        { 8, 2, 3 },
+        { 9, 0, 1 },
+        { 10, 0, 1 }
+    });
+  }
+
+  public void testMultipleNearUnordered() throws Exception {
+    Query q = new UnorderedNearQuery(1, makeTermQuery("w1"), makeTermQuery("w2"), makeTermQuery("w3"));
+    checkIntervals(q, searcher, new int[][]{
+        { 0, 0, 2 },
+        { 1, 0, 2 },
+        { 3, 0, 3 }
+    });
+  }
+  /*
+        "w1 w2 w3 w4 w5", //0
+      "w1 w3 w2 w3",//1
+      "w1 xx w2 yy w3",//2
+      "w1 w3 xx w2 yy w3",//3
+      "u2 u2 u1", //4
+      "u2 xx u2 u1",//5
+      "u2 u2 xx u1", //6
+      "u2 xx u2 yy u1", //7
+      "u2 xx u1 u2",//8
+      "u1 u2 xx u2",//9
+      "u2 u1 xx u2",//10
+      "t1 t2 t1 t3 t2 t3"};//11
+   */
+
+  // ((u1 near u2) and xx)
+  public void testNestedNear() throws Exception {
+
+    Query q = new UnorderedNearQuery(0, makeTermQuery("u1"), makeTermQuery("u2"));
+    BooleanQuery topq = new BooleanQuery();
+    topq.add(q, BooleanClause.Occur.MUST);
+    topq.add(makeTermQuery("xx"), BooleanClause.Occur.MUST);
+
+    checkIntervals(topq, searcher, new int[][]{
+        { 5, 1, 1, 2, 3 },
+        { 8, 1, 1, 2, 3 },
+        { 9, 0, 1, 2, 2 },
+        { 10, 0, 1, 2, 2 }
+    });
+
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/search/posfilter/TestPositionsAndOffsets.java b/lucene/core/src/test/org/apache/lucene/search/posfilter/TestPositionsAndOffsets.java
new file mode 100644
index 0000000..d75663b5
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/search/posfilter/TestPositionsAndOffsets.java
@@ -0,0 +1,68 @@
+package org.apache.lucene.search.posfilter;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+
+import java.io.IOException;
+
+// We need to store offsets here, so don't use the following Codecs, which don't
+// support them.
+@SuppressCodecs({"MockFixedIntBlock", "MockVariableIntBlock", "MockSep", "MockRandom"})
+public class TestPositionsAndOffsets extends IntervalTestBase {
+
+  protected void addDocs(RandomIndexWriter writer) throws IOException {
+    FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED);
+    fieldType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+    Document doc = new Document();
+    doc.add(newField(
+        "field",
+        "Pease porridge hot! Pease porridge cold! Pease porridge in the pot nine days old! Some like it hot, some"
+            + " like it cold, Some like it in the pot nine days old! Pease porridge hot! Pease porridge cold!",
+        fieldType));
+    writer.addDocument(doc);
+  }
+
+  public void testTermQueryOffsets() throws IOException {
+    Query query = new TermQuery(new Term("field", "porridge"));
+    checkIntervalOffsets(query, searcher, new int[][]{
+        { 0, 6, 14, 26, 34, 47, 55, 164, 172, 184, 192 }
+    });
+  }
+
+  public void testBooleanQueryOffsets() throws IOException {
+    BooleanQuery query = new BooleanQuery();
+    query.add(new BooleanClause(new TermQuery(new Term("field", "porridge")),
+        BooleanClause.Occur.MUST));
+    query.add(new BooleanClause(new TermQuery(new Term("field", "nine")),
+        BooleanClause.Occur.MUST));
+    checkIntervalOffsets(query,  searcher, new int[][]{
+        { 0, 6, 14, 26, 34, 47, 55, 67, 71, 143, 147, 164, 172, 184, 192 }
+    });
+  }
+
+}
\ No newline at end of file
diff --git a/lucene/core/src/test/org/apache/lucene/search/posfilter/TestRangeFilterQuery.java b/lucene/core/src/test/org/apache/lucene/search/posfilter/TestRangeFilterQuery.java
new file mode 100644
index 0000000..4b5dde0
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/search/posfilter/TestRangeFilterQuery.java
@@ -0,0 +1,72 @@
+package org.apache.lucene.search.posfilter;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.posfilter.OrderedNearQuery;
+import org.apache.lucene.search.posfilter.RangeFilterQuery;
+import org.junit.Test;
+
+import java.io.IOException;
+
+public class TestRangeFilterQuery extends IntervalTestBase {
+
+  @Override
+  protected void addDocs(RandomIndexWriter writer) throws IOException {
+    for (int i = 0; i < docFields.length; i++) {
+      Document doc = new Document();
+      doc.add(newField("field", docFields[i], TextField.TYPE_STORED));
+      writer.addDocument(doc);
+    }
+  }
+
+  private String[] docFields = {
+      "w1 w2 w3 w4 w5 w6 w7 w8 w9 w10 w11 w12", //0
+      "w1 w3 w4 w5 w6 w7 w8 w4", //1
+      "w1 w3 w10 w4 w5 w6 w7 w8", //2
+      "w1 w3 w2 w4 w10 w5 w6 w7 w8", //3
+  };
+
+  @Test
+  public void testSimpleTermRangeFilter() throws IOException {
+    Query q = new RangeFilterQuery(2, makeTermQuery("w4"));
+    checkIntervals(q, searcher, new int[][]{
+        { 1, 2, 2 }
+    });
+  }
+
+  @Test
+  public void testStartEndTermRangeFilter() throws IOException {
+    Query q = new RangeFilterQuery(2, 4, makeTermQuery("w3"));
+    checkIntervals(q, searcher, new int[][]{
+        { 0, 2, 2 }
+    });
+  }
+
+  public void testRangeFilteredPositionFilter() throws IOException {
+    Query q = new OrderedNearQuery(0, makeTermQuery("w4"), makeTermQuery("w5"));
+    q = new RangeFilterQuery(3, 10, q);
+    checkIntervals(q, searcher, new int[][]{
+        { 0, 3, 4 },
+        { 2, 3, 4 }
+    });
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
index 48a4b4f..e8ba500 100644
--- a/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
+++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
@@ -30,6 +30,7 @@
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight.PostingFeatures;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 
@@ -167,7 +168,7 @@
     Weight w = searcher.createNormalizedWeight(q);
     IndexReaderContext topReaderContext = searcher.getTopReaderContext();
     AtomicReaderContext leave = topReaderContext.leaves().get(0);
-    Scorer s = w.scorer(leave, true, false, leave.reader().getLiveDocs());
+    Scorer s = w.scorer(leave, true, false, PostingFeatures.POSITIONS, leave.reader().getLiveDocs());
     assertEquals(1, s.advance(1));
   }
   
diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java
index 30e8a9e..25ed50b 100644
--- a/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java
+++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java
@@ -38,6 +38,7 @@
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.Weight.PostingFeatures;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.store.Directory;
@@ -429,7 +430,7 @@
                                 slop,
                                 ordered);
   
-        spanScorer = searcher.createNormalizedWeight(snq).scorer(ctx, true, false, ctx.reader().getLiveDocs());
+        spanScorer = searcher.createNormalizedWeight(snq).scorer(ctx, true, false, PostingFeatures.POSITIONS, ctx.reader().getLiveDocs());
       } finally {
         searcher.setSimilarity(oldSim);
       }
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysQuery.java b/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysQuery.java
index eee0431..9cf1c0d 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysQuery.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysQuery.java
@@ -16,9 +16,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-import java.io.IOException;
-import java.util.Arrays;
-
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.DocsEnum;
@@ -34,6 +31,9 @@
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.util.Bits;
 
+import java.io.IOException;
+import java.util.Arrays;
+
 class DrillSidewaysQuery extends Query {
   final Query baseQuery;
   final Collector drillDownCollector;
@@ -103,7 +103,7 @@
 
       @Override
       public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-                           boolean topScorer, Bits acceptDocs) throws IOException {
+                           boolean topScorer, PostingFeatures pf, Bits acceptDocs) throws IOException {
 
         DrillSidewaysScorer.DocsEnumsAndFreq[] dims = new DrillSidewaysScorer.DocsEnumsAndFreq[drillDownTerms.length];
         TermsEnum termsEnum = null;
@@ -149,7 +149,7 @@
 
         // TODO: it could be better if we take acceptDocs
         // into account instead of baseScorer?
-        Scorer baseScorer = baseWeight.scorer(context, scoreDocsInOrder, false, acceptDocs);
+        Scorer baseScorer = baseWeight.scorer(context, scoreDocsInOrder, false, pf, acceptDocs);
 
         if (baseScorer == null) {
           return null;
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/ParallelTaxonomyArrays.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/ParallelTaxonomyArrays.java
new file mode 100644
index 0000000..5a5e5a4
--- /dev/null
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/ParallelTaxonomyArrays.java
@@ -0,0 +1,230 @@
+package org.apache.lucene.facet.taxonomy.directory;
+
+import org.apache.lucene.facet.taxonomy.TaxonomyReader;
+import org.apache.lucene.index.CorruptIndexException;
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.util.ArrayUtil;
+
+import java.io.IOException;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Returns 3 arrays for traversing the taxonomy:
+ * <ul>
+ * <li>{@code parents}: {@code parents[i]} denotes the parent of category
+ * ordinal {@code i}.</li>
+ * <li>{@code children}: {@code children[i]} denotes the youngest child of
+ * category ordinal {@code i}. The youngest child is defined as the category
+ * that was added last to the taxonomy as an immediate child of {@code i}.</li>
+ * <li>{@code siblings}: {@code siblings[i]} denotes the sibling of category
+ * ordinal {@code i}. The sibling is defined as the previous youngest child of
+ * {@code parents[i]}.</li>
+ * </ul>
+ * 
+ * To traverse the taxonomy tree, you typically start with {@code children[0]}
+ * (ordinal 0 is reserved for ROOT), and then depends if you want to do DFS or
+ * BFS, you call {@code children[children[0]]} or {@code siblings[children[0]]}
+ * and so forth, respectively.
+ * 
+ * <p>
+ * <b>NOTE:</b> you are not expected to modify the values of the arrays, since
+ * the arrays are shared with other threads.
+ * 
+ * @lucene.experimental
+ */
+public class ParallelTaxonomyArrays {
+
+  private final int[] parents;
+
+  // the following two arrays are lazily intialized. note that we only keep a
+  // single boolean member as volatile, instead of declaring the arrays
+  // volatile. the code guarantees that only after the boolean is set to true,
+  // the arrays are returned.
+  private volatile boolean initializedChildren = false;
+  private int[] children, siblings;
+  
+  /** Used by {@link #add(int, int)} after the array grew. */
+  private ParallelTaxonomyArrays(int[] parents) {
+    this.parents = parents;
+  }
+
+  public ParallelTaxonomyArrays(IndexReader reader) throws IOException {
+    parents = new int[reader.maxDoc()];
+    if (parents.length > 0) {
+      initParents(reader, 0);
+      // Starting Lucene 2.9, following the change LUCENE-1542, we can
+      // no longer reliably read the parent "-1" (see comment in
+      // LuceneTaxonomyWriter.SinglePositionTokenStream). We have no way
+      // to fix this in indexing without breaking backward-compatibility
+      // with existing indexes, so what we'll do instead is just
+      // hard-code the parent of ordinal 0 to be -1, and assume (as is
+      // indeed the case) that no other parent can be -1.
+      parents[0] = TaxonomyReader.INVALID_ORDINAL;
+    }
+  }
+  
+  public ParallelTaxonomyArrays(IndexReader reader, ParallelTaxonomyArrays copyFrom) throws IOException {
+    assert copyFrom != null;
+
+    // note that copyParents.length may be equal to reader.maxDoc(). this is not a bug
+    // it may be caused if e.g. the taxonomy segments were merged, and so an updated
+    // NRT reader was obtained, even though nothing was changed. this is not very likely
+    // to happen.
+    int[] copyParents = copyFrom.parents();
+    this.parents = new int[reader.maxDoc()];
+    System.arraycopy(copyParents, 0, parents, 0, copyParents.length);
+    initParents(reader, copyParents.length);
+
+    if (copyFrom.initializedChildren) {
+      initChildrenSiblings(copyFrom);
+    }
+  }
+
+  private final synchronized void initChildrenSiblings(ParallelTaxonomyArrays copyFrom) {
+    if (!initializedChildren) { // must do this check !
+      children = new int[parents.length];
+      siblings = new int[parents.length];
+      if (copyFrom != null) {
+        // called from the ctor, after we know copyFrom has initialized children/siblings
+        System.arraycopy(copyFrom.children(), 0, children, 0, copyFrom.children().length);
+        System.arraycopy(copyFrom.siblings(), 0, siblings, 0, copyFrom.siblings().length);
+      }
+      computeChildrenSiblings(parents, 0);
+      initializedChildren = true;
+    }
+  }
+  
+  private void computeChildrenSiblings(int[] parents, int first) {
+    // reset the youngest child of all ordinals. while this should be done only
+    // for the leaves, we don't know up front which are the leaves, so we reset
+    // all of them.
+    for (int i = first; i < parents.length; i++) {
+      children[i] = TaxonomyReader.INVALID_ORDINAL;
+    }
+    
+    // the root category has no parent, and therefore no siblings
+    if (first == 0) {
+      first = 1;
+      siblings[0] = TaxonomyReader.INVALID_ORDINAL;
+    }
+    
+    for (int i = first; i < parents.length; i++) {
+      // note that parents[i] is always < i, so the right-hand-side of
+      // the following line is already set when we get here
+      siblings[i] = children[parents[i]];
+      children[parents[i]] = i;
+    }
+  }
+  
+  // Read the parents of the new categories
+  private void initParents(IndexReader reader, int first) throws IOException {
+    if (reader.maxDoc() == first) {
+      return;
+    }
+    
+    // it's ok to use MultiFields because we only iterate on one posting list.
+    // breaking it to loop over the leaves() only complicates code for no
+    // apparent gain.
+    DocsEnum positions = MultiFields.getTermPositionsEnum(reader, null,
+        Consts.FIELD_PAYLOADS, Consts.PAYLOAD_PARENT_BYTES_REF,
+        DocsEnum.FLAG_PAYLOADS);
+
+    // shouldn't really happen, if it does, something's wrong
+    if (positions == null || positions.advance(first) == DocIdSetIterator.NO_MORE_DOCS) {
+      throw new CorruptIndexException("Missing parent data for category " + first);
+    }
+    
+    int num = reader.maxDoc();
+    for (int i = first; i < num; i++) {
+      if (positions.docID() == i) {
+        if (positions.freq() == 0) { // shouldn't happen
+          throw new CorruptIndexException("Missing parent data for category " + i);
+        }
+        
+        parents[i] = positions.nextPosition();
+        
+        if (positions.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
+          if (i + 1 < num) {
+            throw new CorruptIndexException("Missing parent data for category "+ (i + 1));
+          }
+          break;
+        }
+      } else { // this shouldn't happen
+        throw new CorruptIndexException("Missing parent data for category " + i);
+      }
+    }
+  }
+  
+  /**
+   * Adds the given ordinal/parent info and returns either a new instance if the
+   * underlying array had to grow, or this instance otherwise.
+   * <p>
+   * <b>NOTE:</b> you should call this method from a thread-safe code.
+   */
+  ParallelTaxonomyArrays add(int ordinal, int parentOrdinal) {
+    if (ordinal >= parents.length) {
+      int[] newarray = ArrayUtil.grow(parents, ordinal + 1);
+      newarray[ordinal] = parentOrdinal;
+      return new ParallelTaxonomyArrays(newarray);
+    }
+    parents[ordinal] = parentOrdinal;
+    return this;
+  }
+  
+  /**
+   * Returns the parents array, where {@code parents[i]} denotes the parent of
+   * category ordinal {@code i}.
+   */
+  public int[] parents() {
+    return parents;
+  }
+  
+  /**
+   * Returns the children array, where {@code children[i]} denotes the youngest
+   * child of category ordinal {@code i}. The youngest child is defined as the
+   * category that was added last to the taxonomy as an immediate child of
+   * {@code i}.
+   */
+  public int[] children() {
+    if (!initializedChildren) {
+      initChildrenSiblings(null);
+    }
+    
+    // the array is guaranteed to be populated
+    return children;
+  }
+  
+  /**
+   * Returns the siblings array, where {@code siblings[i]} denotes the sibling
+   * of category ordinal {@code i}. The sibling is defined as the previous
+   * youngest child of {@code parents[i]}.
+   */
+  public int[] siblings() {
+    if (!initializedChildren) {
+      initChildrenSiblings(null);
+    }
+    
+    // the array is guaranteed to be populated
+    return siblings;
+  }
+
+}
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java
index a06bca3..d1a8b77 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java
@@ -1,16 +1,16 @@
 package org.apache.lucene.facet.taxonomy.directory;
 
-import java.io.IOException;
-
 import org.apache.lucene.facet.taxonomy.ParallelTaxonomyArrays;
 import org.apache.lucene.facet.taxonomy.TaxonomyReader;
 import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.util.ArrayUtil;
 
+import java.io.IOException;
+
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -129,9 +129,9 @@
     // it's ok to use MultiFields because we only iterate on one posting list.
     // breaking it to loop over the leaves() only complicates code for no
     // apparent gain.
-    DocsAndPositionsEnum positions = MultiFields.getTermPositionsEnum(reader, null,
+    DocsEnum positions = MultiFields.getTermPositionsEnum(reader, null,
         Consts.FIELD_PAYLOADS, Consts.PAYLOAD_PARENT_BYTES_REF,
-        DocsAndPositionsEnum.FLAG_PAYLOADS);
+        DocsEnum.FLAG_PAYLOADS);
 
     // shouldn't really happen, if it does, something's wrong
     if (positions == null || positions.advance(first) == DocIdSetIterator.NO_MORE_DOCS) {
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/util/FacetsPayloadMigrationReader.java b/lucene/facet/src/java/org/apache/lucene/facet/util/FacetsPayloadMigrationReader.java
index e3d998d..8958577 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/util/FacetsPayloadMigrationReader.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/util/FacetsPayloadMigrationReader.java
@@ -17,21 +17,15 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-
 import org.apache.lucene.facet.params.CategoryListParams;
 import org.apache.lucene.facet.params.FacetIndexingParams;
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.FieldInfo.DocValuesType;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfo.DocValuesType;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.FilterAtomicReader;
@@ -43,6 +37,12 @@
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+
 /**
  * A {@link FilterAtomicReader} for migrating a facets index which encodes
  * category ordinals in a payload to {@link BinaryDocValues}. To migrate the index,
@@ -80,20 +80,20 @@
 
     private Fields fields;
     private Term term;
-    private DocsAndPositionsEnum dpe;
+    private DocsEnum dpe;
     private int curDocID = -1;
     private int lastRequestedDocID;
 
-    private DocsAndPositionsEnum getDPE() {
+    private DocsEnum getDPE() {
       try {
-        DocsAndPositionsEnum dpe = null;
+        DocsEnum dpe = null;
         if (fields != null) {
           Terms terms = fields.terms(term.field());
           if (terms != null) {
             TermsEnum te = terms.iterator(null); // no use for reusing
             if (te.seekExact(term.bytes())) {
               // we're not expected to be called for deleted documents
-              dpe = te.docsAndPositions(null, null, DocsAndPositionsEnum.FLAG_PAYLOADS);
+              dpe = te.docsAndPositions(null, null, DocsEnum.FLAG_PAYLOADS);
             }
           }
         }
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
index a39678d..6760f77 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
@@ -20,7 +20,19 @@
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TopDocsCollector;
+import org.apache.lucene.search.TopFieldCollector;
+import org.apache.lucene.search.TopScoreDocCollector;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.PriorityQueue;
 
@@ -119,6 +131,11 @@
     }
 
     @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
     public long cost() {
       return 1;
     }
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java
index e0b3c8c..5f7d73a 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java
@@ -26,7 +26,7 @@
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.StoredDocument;
@@ -202,7 +202,7 @@
     ArrayList<Token> unsortedTokens = null;
     termsEnum = tpv.iterator(null);
     BytesRef text;
-    DocsAndPositionsEnum dpEnum = null;
+    DocsEnum dpEnum = null;
     while ((text = termsEnum.next()) != null) {
 
       dpEnum = termsEnum.docsAndPositions(null, dpEnum);
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermPositionVector.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermPositionVector.java
index 4057bd9..2bc7dc1 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermPositionVector.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermPositionVector.java
@@ -27,7 +27,7 @@
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.util.BytesRef;
@@ -62,7 +62,7 @@
     final boolean hasOffsets = vector.hasOffsets();
     final TermsEnum termsEnum = vector.iterator(null);
     BytesRef text;
-    DocsAndPositionsEnum dpEnum = null;
+    DocsEnum dpEnum = null;
     while((text = termsEnum.next()) != null) {
       dpEnum = termsEnum.docsAndPositions(null, dpEnum);
       assert dpEnum != null; // presumably checked by TokenSources.hasPositions earlier
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/positions/ArrayIntervalIterator.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/positions/ArrayIntervalIterator.java
new file mode 100644
index 0000000..180add4
--- /dev/null
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/positions/ArrayIntervalIterator.java
@@ -0,0 +1,69 @@
+package org.apache.lucene.search.highlight.positions;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.search.posfilter.Interval;
+import org.apache.lucene.search.intervals.IntervalCollector;
+import org.apache.lucene.search.intervals.IntervalIterator;
+
+/**
+ * Present an array of PositionIntervals as an Iterator.
+ * @lucene.experimental
+ */
+public class ArrayIntervalIterator extends IntervalIterator {
+
+  private int next = 0;
+  private int count;
+  private Interval[] positions;
+  
+  public ArrayIntervalIterator (Interval[] positions, int count) {
+    super(null, false);
+    this.positions = positions;
+    this.count = count;
+  }
+  
+  @Override
+  public Interval next() {
+    if (next >= count)
+      return null;
+    return positions[next++];
+  }
+
+  @Override
+  public IntervalIterator[] subs(boolean inOrder) {
+    return EMPTY;
+  }
+
+  @Override
+  public void collect(IntervalCollector collector) {
+    assert collectIntervals;
+  }
+
+  @Override
+  public int scorerAdvanced(int docId) throws IOException {
+    return 0;
+  }
+
+  @Override
+  public int matchDistance() {
+    return 0;
+  }
+  
+}
\ No newline at end of file
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/positions/DocAndPositions.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/positions/DocAndPositions.java
new file mode 100644
index 0000000..f8d8fd8
--- /dev/null
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/positions/DocAndPositions.java
@@ -0,0 +1,66 @@
+package org.apache.lucene.search.highlight.positions;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Comparator;
+
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.posfilter.Interval;
+import org.apache.lucene.util.ArrayUtil;
+
+/** Used to accumulate position intervals while scoring 
+ * @lucene.experimental
+ */
+public final class DocAndPositions extends ScoreDoc {
+  
+  public int posCount = 0;
+  public Interval[] positions;
+  
+  public DocAndPositions(int doc) {
+    super(doc, 0);
+    positions = new Interval[32];
+  }
+  
+  public void storePosition (Interval pos) {
+    ensureStorage();
+    positions[posCount++] = (Interval) pos.clone();
+  }
+  
+  private void ensureStorage () {
+    if (posCount >= positions.length) {
+      Interval temp[] = new Interval[positions.length * 2];
+      System.arraycopy(positions, 0, temp, 0, positions.length);
+      positions = temp;
+    }
+  }
+  
+  public Interval[] sortedPositions() {
+    ArrayUtil.mergeSort(positions, 0, posCount, new Comparator<Interval>() {
+      public int compare(Interval o1, Interval o2) {
+        return 
+          o1.begin < o2.begin ? -1 : 
+            (o1.begin > o2.begin ? 1 :
+              (o1.end < o2.end ? -1 : 
+                (o1.end > o2.end ? 1 : 
+                  0)));
+      }
+      
+    });
+    return positions;
+  }
+}
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/positions/HighlightingIntervalCollector.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/positions/HighlightingIntervalCollector.java
new file mode 100644
index 0000000..f64bbf1
--- /dev/null
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/positions/HighlightingIntervalCollector.java
@@ -0,0 +1,109 @@
+package org.apache.lucene.search.highlight.positions;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight.PostingFeatures;
+import org.apache.lucene.search.posfilter.Interval;
+import org.apache.lucene.search.intervals.IntervalCollector;
+import org.apache.lucene.search.intervals.IntervalIterator;
+
+/**
+ * Collects the first maxDocs docs and their positions matching the query
+ * 
+ * @lucene.experimental
+ */
+
+public class HighlightingIntervalCollector extends Collector implements IntervalCollector {
+  
+  int count;
+  DocAndPositions docs[];
+  
+  public HighlightingIntervalCollector (int maxDocs) {
+    docs = new DocAndPositions[maxDocs];
+  }
+  
+  protected Scorer scorer;
+  private IntervalIterator positions;
+
+  @Override
+  public void collect(int doc) throws IOException {
+    if (count >= docs.length)
+      return;
+    addDoc (doc);
+    // consume any remaining positions the scorer didn't report
+    docs[count-1].score=scorer.score();
+    positions.scorerAdvanced(doc);
+    while(positions.next() != null) {
+      positions.collect(this);
+    }    
+  }
+  
+  private boolean addDoc (int doc) {
+    if (count <= 0 || docs[count-1].doc != doc) {
+      DocAndPositions spdoc = new DocAndPositions (doc);
+      docs[count++] = spdoc;
+      return true;
+    }
+    return false;
+  }
+  
+  public boolean acceptsDocsOutOfOrder() {
+    return false;
+  }
+
+  public void setScorer(Scorer scorer) throws IOException {
+    this.scorer = scorer;
+    positions = scorer.intervals(true);
+    // If we want to visit the other scorers, we can, here...
+  }
+  
+  public Scorer getScorer () {
+    return scorer;
+  }
+  
+  public DocAndPositions[] getDocs () {
+    DocAndPositions ret[] = new DocAndPositions[count];
+    System.arraycopy(docs, 0, ret, 0, count);
+    return ret;
+  }
+
+  public void setNextReader(AtomicReaderContext context) throws IOException {
+  }
+  
+  @Override
+  public PostingFeatures postingFeatures() {
+    return PostingFeatures.OFFSETS;
+  }
+
+  @Override
+  public void collectLeafPosition(Scorer scorer, Interval interval,
+      int docID) {
+    addDoc(docID);      
+    docs[count - 1].storePosition(interval);
+  }
+
+  @Override
+  public void collectComposite(Scorer scorer, Interval interval,
+      int docID) {
+  }
+
+}
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/positions/IntervalTokenStream.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/positions/IntervalTokenStream.java
new file mode 100644
index 0000000..8d7ce17
--- /dev/null
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/positions/IntervalTokenStream.java
@@ -0,0 +1,74 @@
+package org.apache.lucene.search.highlight.positions;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.search.posfilter.Interval;
+import org.apache.lucene.search.intervals.IntervalIterator;
+
+/**
+ * A TokenStream constructed from a stream of positions and their offsets.
+ * The document is segmented into tokens at the start and end offset of each interval.  The intervals
+ * are assumed to be non-overlapping.
+ * 
+ * TODO: abstract the dependency on the current PositionOffsetMapper impl; 
+ * allow for implementations of position->offset maps that don't rely on term vectors.
+ * 
+ * @lucene.experimental
+ */
+public class IntervalTokenStream extends TokenStream {
+
+  //this tokenizer generates four attributes:
+  // term, offset, positionIncrement? and type?
+  private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+  private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+  private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+  //private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
+  private final String text;
+  private final IntervalIterator positions;
+  
+  // the index of the current position interval
+  private Interval pos = null;
+  
+  public IntervalTokenStream (String text, IntervalIterator positions) {
+    this.text = text;
+    this.positions = positions;
+  }
+  
+  @Override
+  public final boolean incrementToken() throws IOException {
+    pos = positions.next();
+    if (pos == null){
+      return false;
+    }
+    int b, e; 
+    b = pos.offsetBegin;
+    e = pos.offsetEnd;
+    assert b >=0;
+    termAtt.append(text, b, e);
+    offsetAtt.setOffset(b, e);
+    posIncrAtt.setPositionIncrement(1);
+    return true;
+  }
+
+}
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
index d39e484..82e79c0 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
@@ -17,22 +17,9 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.text.BreakIterator;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.PriorityQueue;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.IndexReader;
@@ -51,6 +38,19 @@
 import org.apache.lucene.util.InPlaceMergeSorter;
 import org.apache.lucene.util.UnicodeUtil;
 
+import java.io.IOException;
+import java.text.BreakIterator;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.PriorityQueue;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
 /**
  * Simple highlighter that does not analyze fields nor use
  * term vectors. Instead it requires 
@@ -437,7 +437,7 @@
     Map<Integer,Object> highlights = new HashMap<Integer,Object>();
     
     // reuse in the real sense... for docs in same segment we just advance our old enum
-    DocsAndPositionsEnum postings[] = null;
+    DocsEnum postings[] = null;
     TermsEnum termsEnum = null;
     int lastLeaf = -1;
 
@@ -462,7 +462,7 @@
       }
       if (leaf != lastLeaf) {
         termsEnum = t.iterator(null);
-        postings = new DocsAndPositionsEnum[terms.length];
+        postings = new DocsEnum[terms.length];
       }
       Passage passages[] = highlightDoc(field, terms, content.length(), bi, doc - subContext.docBase, termsEnum, postings, maxPassages);
       if (passages.length == 0) {
@@ -483,7 +483,7 @@
   // we can intersect these with the postings lists via BreakIterator.preceding(offset),s
   // score each sentence as norm(sentenceStartOffset) * sum(weight * tf(freq))
   private Passage[] highlightDoc(String field, BytesRef terms[], int contentLength, BreakIterator bi, int doc, 
-      TermsEnum termsEnum, DocsAndPositionsEnum[] postings, int n) throws IOException {
+      TermsEnum termsEnum, DocsEnum[] postings, int n) throws IOException {
     PassageScorer scorer = getScorer(field);
     if (scorer == null) {
       throw new NullPointerException("PassageScorer cannot be null");
@@ -492,7 +492,7 @@
     float weights[] = new float[terms.length];
     // initialize postings
     for (int i = 0; i < terms.length; i++) {
-      DocsAndPositionsEnum de = postings[i];
+      DocsEnum de = postings[i];
       int pDoc;
       if (de == EMPTY) {
         continue;
@@ -501,7 +501,7 @@
         if (!termsEnum.seekExact(terms[i])) {
           continue; // term not found
         }
-        de = postings[i] = termsEnum.docsAndPositions(null, null, DocsAndPositionsEnum.FLAG_OFFSETS);
+        de = postings[i] = termsEnum.docsAndPositions(null, null, DocsEnum.FLAG_OFFSETS);
         if (de == null) {
           // no positions available
           throw new IllegalArgumentException("field '" + field + "' was indexed without offsets, cannot highlight");
@@ -539,7 +539,7 @@
     
     OffsetsEnum off;
     while ((off = pq.poll()) != null) {
-      final DocsAndPositionsEnum dp = off.dp;
+      final DocsEnum dp = off.dp;
       int start = dp.startOffset();
       if (start == -1) {
         throw new IllegalArgumentException("field '" + field + "' was indexed without offsets, cannot highlight");
@@ -641,11 +641,11 @@
   }
   
   private static class OffsetsEnum implements Comparable<OffsetsEnum> {
-    DocsAndPositionsEnum dp;
+    DocsEnum dp;
     int pos;
     int id;
     
-    OffsetsEnum(DocsAndPositionsEnum dp, int id) throws IOException {
+    OffsetsEnum(DocsEnum dp, int id) throws IOException {
       this.dp = dp;
       this.id = id;
       this.pos = 1;
@@ -667,10 +667,10 @@
     }
   }
   
-  private static final DocsAndPositionsEnum EMPTY = new DocsAndPositionsEnum() {
+  private static final DocsEnum EMPTY = new DocsEnum() {
 
     @Override
-    public int nextPosition() throws IOException { return 0; }
+    public int nextPosition() throws IOException { return NO_MORE_POSITIONS; }
 
     @Override
     public int startOffset() throws IOException { return Integer.MAX_VALUE; }
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
index 7c4534e..8cafdba 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
@@ -21,7 +21,7 @@
 import java.util.LinkedList;
 import java.util.Set;
 
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
@@ -93,7 +93,7 @@
 
     final CharsRef spare = new CharsRef();
     final TermsEnum termsEnum = vector.iterator(null);
-    DocsAndPositionsEnum dpEnum = null;
+    DocsEnum dpEnum = null;
     BytesRef text;
     
     int numDocs = reader.maxDoc();
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java
index 536259a..a52d9b7 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java
@@ -16,9 +16,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-import java.io.IOException;
-import java.util.Map;
-
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenFilter;
 import org.apache.lucene.analysis.MockTokenizer;
@@ -36,6 +33,9 @@
 import org.apache.lucene.search.highlight.WeightedSpanTermExtractor;
 import org.apache.lucene.util.LuceneTestCase;
 
+import java.io.IOException;
+import java.util.Map;
+
 /**
  * Tests the extensibility of {@link WeightedSpanTermExtractor} and
  * {@link QueryScorer} in a user defined package
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/positions/IntervalHighlighterTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/positions/IntervalHighlighterTest.java
new file mode 100644
index 0000000..5dad876
--- /dev/null
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/positions/IntervalHighlighterTest.java
@@ -0,0 +1,528 @@
+package org.apache.lucene.search.highlight.positions;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenFilter;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MultiPhraseQuery;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.WildcardQuery;
+import org.apache.lucene.search.highlight.Highlighter;
+import org.apache.lucene.search.highlight.InvalidTokenOffsetsException;
+import org.apache.lucene.search.highlight.SimpleFragmenter;
+import org.apache.lucene.search.highlight.TextFragment;
+import org.apache.lucene.search.intervals.BlockIntervalIterator;
+import org.apache.lucene.search.intervals.IntervalFilter;
+import org.apache.lucene.search.intervals.IntervalFilterQuery;
+import org.apache.lucene.search.intervals.IntervalIterator;
+import org.apache.lucene.search.posfilter.NonOverlappingQuery;
+import org.apache.lucene.search.posfilter.OrderedNearQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.junit.Ignore;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+/**
+ * TODO: FIX THIS TEST Phrase and Span Queries positions callback API
+ */
+@SuppressCodecs({"MockFixedIntBlock", "MockVariableIntBlock", "MockSep", "MockRandom"})
+public class IntervalHighlighterTest extends LuceneTestCase {
+  
+  protected final static String F = "f";
+  protected Analyzer analyzer;
+  protected Directory dir;
+  protected IndexSearcher searcher;
+  private IndexWriterConfig iwc;
+  
+  private static final String PORRIDGE_VERSE = "Pease porridge hot! Pease porridge cold! Pease porridge in the pot nine days old! Some like it hot, some"
+      + " like it cold, Some like it in the pot nine days old! Pease porridge hot! Pease porridge cold!";
+  
+  public void setUp() throws Exception {
+    super.setUp();
+    iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.CREATE);
+    analyzer = iwc.getAnalyzer();
+    dir = newDirectory();
+  }
+  
+  public void close() throws IOException {
+    if (searcher != null) {
+      searcher.getIndexReader().close();
+      searcher = null;
+    }
+    dir.close();
+  }
+  
+  // make several docs
+  protected void insertDocs(Analyzer analyzer, String... values)
+      throws Exception {
+    IndexWriter writer = new IndexWriter(dir, iwc);
+    FieldType type = new FieldType();
+    type.setIndexed(true);
+    type.setTokenized(true);
+    type.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+    type.setStored(true);
+    for (String value : values) {
+      Document doc = new Document();
+      Field f = newField(F, value, type);
+      doc.add(f);
+      writer.addDocument(doc);
+    }
+    writer.close();
+    if (searcher != null) {
+      searcher.getIndexReader().close();
+    }
+    searcher = new IndexSearcher(DirectoryReader.open(dir));
+  }
+
+  protected static TermQuery termQuery(String term) {
+    return new TermQuery(new Term(F, term));
+  }
+  
+  private String[] doSearch(Query q) throws IOException,
+      InvalidTokenOffsetsException {
+    return doSearch(q, 100);
+  }
+  
+  private class ConstantScorer implements
+      org.apache.lucene.search.highlight.Scorer {
+    
+    @Override
+    public TokenStream init(TokenStream tokenStream) throws IOException {
+      return tokenStream;
+    }
+    
+    @Override
+    public void startFragment(TextFragment newFragment) {}
+    
+    @Override
+    public float getTokenScore() {
+      return 1;
+    }
+    
+    @Override
+    public float getFragmentScore() {
+      return 1;
+    }
+  }
+
+  private String getHighlight(Query q) throws IOException, InvalidTokenOffsetsException {
+    return doSearch(q, Integer.MAX_VALUE)[0];
+  }
+  
+  private String[] doSearch(Query q, int maxFragSize) throws IOException,
+      InvalidTokenOffsetsException {
+    return doSearch(q, maxFragSize, 0);
+  }
+  private String[] doSearch(Query q, int maxFragSize, int docIndex) throws IOException, InvalidTokenOffsetsException {
+    return doSearch(q, maxFragSize, docIndex, false);
+  }
+  private String[] doSearch(Query q, int maxFragSize, int docIndex, boolean analyze)
+      throws IOException, InvalidTokenOffsetsException {
+    // ConstantScorer is a fragment Scorer, not a search result (document)
+    // Scorer
+    Highlighter highlighter = new Highlighter(new ConstantScorer());
+    highlighter.setTextFragmenter(new SimpleFragmenter(maxFragSize));
+    HighlightingIntervalCollector collector = new HighlightingIntervalCollector(10);
+    if (q instanceof MultiTermQuery) {
+      ((MultiTermQuery) q)
+          .setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
+    }
+    searcher.search(q, collector);
+    DocAndPositions doc = collector.docs[docIndex];
+    if (doc == null) return null;
+    String text = searcher.getIndexReader().document(doc.doc).get(F);
+    // FIXME: test error cases: for non-stored fields, and fields w/no term
+    // vectors
+    // searcher.getIndexReader().getTermFreqVector(doc.doc, F, pom);
+    final TokenStream stream;
+    if (analyze) {
+      stream = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true,
+          MockTokenFilter.EMPTY_STOPSET).tokenStream(F,
+          new StringReader(text));
+    } else {
+      stream = new IntervalTokenStream(text, new ArrayIntervalIterator(
+          doc.sortedPositions(), doc.posCount));
+    }
+    //
+    TextFragment[] fragTexts = highlighter.getBestTextFragments(
+         stream , text, false, 10);
+    String[] frags = new String[fragTexts.length];
+    for (int i = 0; i < frags.length; i++)
+      frags[i] = fragTexts[i].toString();
+    return frags;
+  }
+  
+  public void testTerm() throws Exception {
+    insertDocs(analyzer, "This is a test test");
+    String frags[] = doSearch(termQuery("test"));
+    assertEquals("This is a <B>test</B> <B>test</B>", frags[0]);
+    close();
+  }
+  
+  public void testSeveralSnippets() throws Exception {
+    String input = "this is some long text.  It has the word long in many places.  In fact, it has long on some different fragments.  "
+        + "Let us see what happens to long in this case.";
+    String gold = "this is some <B>long</B> text.  It has the word <B>long</B> in many places.  In fact, it has <B>long</B> on some different fragments.  "
+        + "Let us see what happens to <B>long</B> in this case.";
+    insertDocs(analyzer, input);
+    String frags[] = doSearch(termQuery("long"), input.length());
+    assertEquals(gold, frags[0]);
+    close();
+  }
+  
+  public void testBooleanAnd() throws Exception {
+    insertDocs(analyzer, "This is a test");
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new BooleanClause(termQuery("This"), Occur.MUST));
+    bq.add(new BooleanClause(termQuery("test"), Occur.MUST));
+    String frags[] = doSearch(bq);
+    assertEquals("<B>This</B> is a <B>test</B>", frags[0]);
+    close();
+  }
+  
+  public void testConstantScore() throws Exception {
+    insertDocs(analyzer, "This is a test");
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new BooleanClause(termQuery("This"), Occur.MUST));
+    bq.add(new BooleanClause(termQuery("test"), Occur.MUST));
+    String frags[] = doSearch(new ConstantScoreQuery(bq));
+    assertEquals("<B>This</B> is a <B>test</B>", frags[0]);
+    close();
+  }
+  
+  public void testBooleanAndOtherOrder() throws Exception {
+    insertDocs(analyzer, "This is a test");
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "test")), Occur.MUST));
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "This")), Occur.MUST));
+    String frags[] = doSearch(bq);
+    assertEquals("<B>This</B> is a <B>test</B>", frags[0]);
+    close();
+  }
+  
+  public void testBooleanOr() throws Exception {
+    insertDocs(analyzer, "This is a test");
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "test")), Occur.SHOULD));
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "This")), Occur.SHOULD));
+    String frags[] = doSearch(bq);
+    assertEquals("<B>This</B> is a <B>test</B>", frags[0]);
+    close();
+  }
+  
+  public void testSingleMatchScorer() throws Exception {
+    insertDocs(analyzer, "This is a test");
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "test")), Occur.SHOULD));
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "notoccurringterm")),
+        Occur.SHOULD));
+    String frags[] = doSearch(bq);
+    assertEquals("This is a <B>test</B>", frags[0]);
+    close();
+  }
+  
+  public void testBooleanNrShouldMatch() throws Exception {
+    insertDocs(analyzer, "a b c d e f g h i");
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "a")), Occur.SHOULD));
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "b")), Occur.SHOULD));
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "no")), Occur.SHOULD));
+    
+    // This generates a ConjunctionSumScorer
+    bq.setMinimumNumberShouldMatch(2);
+    String frags[] = doSearch(bq);
+    assertEquals("<B>a</B> <B>b</B> c d e f g h i", frags[0]);
+    
+    // This generates no scorer
+    bq.setMinimumNumberShouldMatch(3);
+    frags = doSearch(bq);
+    assertNull(frags);
+    
+    // This generates a DisjunctionSumScorer
+    bq.setMinimumNumberShouldMatch(2);
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "c")), Occur.SHOULD));
+    frags = doSearch(bq);
+    assertEquals("<B>a</B> <B>b</B> <B>c</B> d e f g h i", frags[0]);
+    close();
+  }
+  
+  public void testPhrase() throws Exception {
+    insertDocs(analyzer, "is it that this is a test, is it");
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "is")), Occur.MUST));
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "a")), Occur.MUST));
+    IntervalFilterQuery pfq = new IntervalFilterQuery(bq,
+        new BlockPositionIteratorFilter());
+    String frags[] = doSearch(pfq);
+    // make sure we highlight the phrase, and not the terms outside the phrase
+    assertEquals("is it that this <B>is</B> <B>a</B> test, is it", frags[0]);
+    close();
+  }
+  
+  /*
+   * Failing ... PhraseQuery scorer needs positions()?
+   */
+  //@Ignore
+  public void testPhraseOriginal() throws Exception {
+    insertDocs(analyzer, "This is a test");
+    PhraseQuery pq = new PhraseQuery();
+    pq.add(new Term(F, "a"));
+    pq.add(new Term(F, "test"));
+    String frags[] = doSearch(pq);
+    assertEquals("This is <B>a</B> <B>test</B>", frags[0]);
+    close();
+  }
+  
+  public void testNestedBoolean() throws Exception {
+    insertDocs(analyzer, "This is a test");
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "test")), Occur.SHOULD));
+    BooleanQuery bq2 = new BooleanQuery();
+    bq2.add(new BooleanClause(new TermQuery(new Term(F, "This")), Occur.SHOULD));
+    bq2.add(new BooleanClause(new TermQuery(new Term(F, "is")), Occur.SHOULD));
+    bq.add(new BooleanClause(bq2, Occur.SHOULD));
+    String frags[] = doSearch(bq);
+    assertEquals("<B>This</B> <B>is</B> a <B>test</B>", frags[0]);
+    close();
+  }
+  
+  public void testWildcard() throws Exception {
+    insertDocs(analyzer, "This is a test");
+    String frags[] = doSearch(new WildcardQuery(new Term(F, "t*t")));
+    assertEquals("This is a <B>test</B>", frags[0]);
+    close();
+  }
+
+  public void testMixedBooleanNot() throws Exception {
+    insertDocs(analyzer, "this is a test", "that is an elephant");
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "test")), Occur.MUST));
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "that")), Occur.MUST_NOT));
+    String frags[] = doSearch(bq);
+    assertEquals("this is a <B>test</B>", frags[0]);
+    close();
+  }
+
+  public void testMixedBooleanShould() throws Exception {
+    insertDocs(analyzer, "this is a test", "that is an elephant", "the other was a rhinoceros");
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "is")), Occur.MUST));
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "test")), Occur.SHOULD));
+    String frags[] = doSearch(bq, 50, 0);
+    assertEquals("this <B>is</B> a <B>test</B>", frags[0]);
+    frags = doSearch(bq, 50, 1);
+    assertEquals("that <B>is</B> an elephant", frags[0]);
+
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "rhinoceros")), Occur.SHOULD));
+    frags = doSearch(bq, 50, 0);
+    assertEquals("this <B>is</B> a <B>test</B>", frags[0]);
+    frags = doSearch(bq, 50, 1);
+    assertEquals("that <B>is</B> an elephant", frags[0]);
+    close();
+  }
+  
+  public void testMultipleDocumentsAnd() throws Exception {
+    insertDocs(analyzer, "This document has no matches", PORRIDGE_VERSE,
+        "This document has some Pease porridge in it");
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "Pease")), Occur.MUST));
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "porridge")), Occur.MUST));
+    String frags[] = doSearch(bq, 50, 0);
+    assertEquals(
+        "<B>Pease</B> <B>porridge</B> hot! <B>Pease</B> <B>porridge</B> cold! <B>Pease</B>",
+        frags[0]);
+    frags = doSearch(bq, 50, 1);
+    assertEquals("This document has some <B>Pease</B> <B>porridge</B> in it",
+        frags[0]);
+    close();
+  }
+  
+
+  public void testMultipleDocumentsOr() throws Exception {
+    insertDocs(analyzer, "This document has no matches", PORRIDGE_VERSE,
+        "This document has some Pease porridge in it");
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "Pease")), Occur.SHOULD));
+    bq.add(new BooleanClause(new TermQuery(new Term(F, "porridge")),
+        Occur.SHOULD));
+    String frags[] = doSearch(bq, 50, 0);
+    assertEquals(
+        "<B>Pease</B> <B>porridge</B> hot! <B>Pease</B> <B>porridge</B> cold! <B>Pease</B>",
+        frags[0]);
+    frags = doSearch(bq, 50, 1);
+    assertEquals("This document has some <B>Pease</B> <B>porridge</B> in it",
+        frags[0]);
+    close();
+  }
+  
+  public void testBrouwerianQuery() throws Exception {
+
+    insertDocs(analyzer, "the quick brown duck jumps over the lazy dog with the quick brown fox");
+
+    BooleanQuery query = new BooleanQuery();
+    query.add(new BooleanClause(new TermQuery(new Term(F, "the")), Occur.MUST));
+    query.add(new BooleanClause(new TermQuery(new Term(F, "quick")), Occur.MUST));
+    query.add(new BooleanClause(new TermQuery(new Term(F, "jumps")), Occur.MUST));
+
+    assertEquals(getHighlight(query),
+                 "<B>the</B> <B>quick</B> brown duck <B>jumps</B> over <B>the</B> lazy dog with the <B>quick</B> brown fox");
+
+    BooleanQuery sub = new BooleanQuery();
+    sub.add(new BooleanClause(new TermQuery(new Term(F, "duck")), Occur.MUST));
+    NonOverlappingQuery bq = new NonOverlappingQuery(query, sub);
+
+    assertEquals(getHighlight(bq),
+                 "the quick brown duck <B>jumps</B> over <B>the</B> lazy dog with the <B>quick</B> brown fox");
+
+    close();
+  }
+  
+  @Ignore("not implemented yet - unsupported")
+  public void testMultiPhraseQuery() throws Exception {
+    MultiPhraseQuery query = new MultiPhraseQuery();
+    insertDocs(analyzer, "pease porridge hot but not too hot or otherwise pease porridge cold");
+
+    query.add(terms(F, "pease"), 0);
+    query.add(terms(F, "porridge"), 1);
+    query.add(terms(F, "hot", "cold"), 2);
+    query.setSlop(1);
+    
+    String[] frags = doSearch(query, Integer.MAX_VALUE);
+    assertEquals("<B>pease</B> <B>porridge</B> <B>hot</B> but not too hot or otherwise <B>pease</B> <B>porridge</B> <B>cold</B>", frags[0]);
+
+    close();
+  }
+  
+  @Ignore("not implemented yet - unsupported")
+  public void testMultiPhraseQueryCollisions() throws Exception {
+    MultiPhraseQuery query = new MultiPhraseQuery();
+    insertDocs(analyzer, "pease porridge hot not too hot or otherwise pease porridge porridge");
+
+    query.add(terms(F, "pease"), 0);
+    query.add(terms(F, "porridge"), 1);
+    query.add(terms(F, "coldasice", "porridge" ), 2);
+    query.setSlop(1);
+    
+    String[] frags = doSearch(query, Integer.MAX_VALUE);
+    assertEquals("pease porridge hot but not too hot or otherwise <B>pease</B> <B>porridge</B> <B>porridge</B>", frags[0]);
+
+    close();
+  }
+
+  public void testNearPhraseQuery() throws Exception {
+
+    insertDocs(analyzer, "pease porridge rather hot and pease porridge fairly cold");
+
+    Query firstQ = new OrderedNearQuery(4, termQuery("pease"), termQuery("porridge"), termQuery("hot"));
+    {
+      String frags[] = doSearch(firstQ, Integer.MAX_VALUE);
+      assertEquals("<B>pease</B> <B>porridge</B> rather <B>hot</B> and pease porridge fairly cold", frags[0]);
+    }
+
+    // near.3(near.4(pease, porridge, hot), near.4(pease, porridge, cold))
+    Query q = new OrderedNearQuery(3,
+                firstQ,
+                new OrderedNearQuery(4, termQuery("pease"), termQuery("porridge"), termQuery("cold")));
+
+    String frags[] = doSearch(q, Integer.MAX_VALUE);
+    assertEquals("<B>pease</B> <B>porridge</B> rather <B>hot</B> and <B>pease</B> <B>porridge</B> fairly <B>cold</B>",
+                 frags[0]);
+
+    close();
+  }
+
+  private Term[] terms(String field, String...tokens) {
+      Term[] terms = new Term[tokens.length];
+      for (int i = 0; i < tokens.length; i++) {
+        terms[i] = new Term(field, tokens[i]);
+      }
+      return terms;
+    }
+
+  public void testSloppyPhraseQuery() throws Exception {
+    assertSloppyPhrase( "a b c d a b c d e f", "a b <B>c</B> d <B>a</B> b c d e f", 2, "c", "a");
+    assertSloppyPhrase( "a c e b d e f a b","<B>a</B> c e <B>b</B> d e f <B>a</B> <B>b</B>", 2, "a", "b");
+    assertSloppyPhrase( "Y A X B A", "Y <B>A</B> <B>X</B> B <B>A</B>", 2, "X", "A", "A");
+
+    assertSloppyPhrase( "X A X B A","<B>X</B> <B>A</B> X B <B>A</B>", 2, "X", "A", "A"); // non overlapping minmal!!
+    assertSloppyPhrase( "A A A X",null, 2, "X", "A", "A");
+    assertSloppyPhrase( "A A X A",  "A <B>A</B> <B>X</B> <B>A</B>", 2, "X", "A", "A");
+    assertSloppyPhrase( "A A X A Y B A", "A <B>A</B> <B>X</B> <B>A</B> Y B <B>A</B>", 2, "X", "A", "A");
+    assertSloppyPhrase( "A A X", null, 2, "X", "A", "A");
+    assertSloppyPhrase( "A X A", null, 1, "X", "A", "A");
+
+    assertSloppyPhrase( "A X B A", "<B>A</B> <B>X</B> B <B>A</B>", 2, "X", "A", "A");
+    assertSloppyPhrase( "A A X A X B A X B B A A X B A A", "A <B>A</B> <B>X</B> <B>A</B> <B>X</B> B <B>A</B> <B>X</B> B B <B>A</B> <B>A</B> <B>X</B> B <B>A</B> <B>A</B>", 2, "X", "A", "A");
+    assertSloppyPhrase( "A A X A X B A X B B A A X B A A", "A <B>A</B> <B>X</B> <B>A</B> <B>X</B> B <B>A</B> <B>X</B> B B <B>A</B> <B>A</B> <B>X</B> B <B>A</B> <B>A</B>", 2, "X", "A", "A");
+
+    assertSloppyPhrase( "A A X A X B A", "A <B>A</B> <B>X</B> <B>A</B> <B>X</B> B <B>A</B>", 2, "X", "A", "A");
+    assertSloppyPhrase( "A A Y A X B A", "A A Y <B>A</B> <B>X</B> B <B>A</B>", 2, "X", "A", "A");
+    assertSloppyPhrase( "A A Y A X B A A", "A A Y <B>A</B> <B>X</B> B <B>A</B> <B>A</B>", 2, "X", "A", "A");
+    assertSloppyPhrase( "A A X A Y B A", null , 1, "X", "A", "A");
+    close();
+  }
+
+
+  private void assertSloppyPhrase(String doc, String expected, int slop, String...query) throws Exception {
+    insertDocs(analyzer, doc);
+    PhraseQuery pq = new PhraseQuery();
+    for (String string : query) {
+      pq.add(new Term(F, string));  
+    }
+    
+    pq.setSlop(slop);
+//    System.out.println(doc);
+    String[] frags = doSearch(pq, 50);
+    if (expected == null) {
+      assertNull(frags != null ? frags[0] : "", frags);
+    } else {
+      assertEquals(expected, frags[0]);
+    }
+  }
+  
+  public static class BlockPositionIteratorFilter implements IntervalFilter {
+
+    @Override
+    public IntervalIterator filter(boolean collectIntervals, IntervalIterator iter) {
+      return new BlockIntervalIterator(collectIntervals, iter);
+    }
+    
+  }
+  
+}
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
index c239ce4..d9e3cb1 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
@@ -31,6 +31,7 @@
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefHash;
@@ -131,7 +132,7 @@
 
       @Override
       public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
-        SVInnerScorer scorer = (SVInnerScorer) scorer(context, false, false, context.reader().getLiveDocs());
+        SVInnerScorer scorer = (SVInnerScorer) scorer(context, false, false, PostingFeatures.DOCS_AND_FREQS, context.reader().getLiveDocs());
         if (scorer != null) {
           if (scorer.advanceForExplainOnly(doc) == doc) {
             return scorer.explain();
@@ -155,8 +156,7 @@
         originalWeight.normalize(norm, topLevelBoost * TermsIncludingScoreQuery.this.getBoost());
       }
 
-      @Override
-      public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
+      public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, PostingFeatures flagsß, Bits acceptDocs) throws IOException {
         Terms terms = context.reader().terms(field);
         if (terms == null) {
           return null;
@@ -278,6 +278,10 @@
       } while (docId != DocIdSetIterator.NO_MORE_DOCS);
       return docId;
     }
+    @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      throw new UnsupportedOperationException();
+    }
 
     @Override
     public int freq() {
@@ -376,6 +380,12 @@
     }
 
     @Override
+    public IntervalIterator intervals(boolean collectIntervals)
+        throws IOException {
+      return null;
+    }
+
+    @Override
     public long cost() {
       return cost;
     }
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java
index 272458d..196879a 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java
@@ -34,6 +34,7 @@
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Scorer.ChildScorer;
 import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.FixedBitSet;
 
@@ -122,10 +123,10 @@
     // child document space
     @Override
     public Scorer scorer(AtomicReaderContext readerContext, boolean scoreDocsInOrder,
-        boolean topScorer, Bits acceptDocs) throws IOException {
+        boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
 
       // Pass scoreDocsInOrder true, topScorer false to our sub:
-      final Scorer parentScorer = parentWeight.scorer(readerContext, true, false, null);
+      final Scorer parentScorer = parentWeight.scorer(readerContext, true, false, flags, null);
 
       if (parentScorer == null) {
         // No matches
@@ -304,6 +305,11 @@
     }
 
     @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      return parentScorer.intervals(collectIntervals);
+    }
+
+    @Override
     public long cost() {
       return parentScorer.cost();
     }
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
index 5e68f5e..b53bd98 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
@@ -23,6 +23,7 @@
 import org.apache.lucene.search.Scorer.ChildScorer;
 import org.apache.lucene.search.grouping.GroupDocs;
 import org.apache.lucene.search.grouping.TopGroups;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.util.ArrayUtil;
 
 import java.io.IOException;
@@ -360,6 +361,11 @@
     }
 
     @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
     public long cost() {
       return 1;
     }
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java
index cd27fa0..03c21a6 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java
@@ -31,6 +31,7 @@
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.search.grouping.TopGroups;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.FixedBitSet;
@@ -159,10 +160,10 @@
     // parent document space
     @Override
     public Scorer scorer(AtomicReaderContext readerContext, boolean scoreDocsInOrder,
-        boolean topScorer, Bits acceptDocs) throws IOException {
+        boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
 
-      // Pass scoreDocsInOrder true, topScorer false to our sub and the live docs:
-      final Scorer childScorer = childWeight.scorer(readerContext, true, false, readerContext.reader().getLiveDocs());
+      // Pass scoreDocsInOrder true, topScorer false to our sub:
+      final Scorer childScorer = childWeight.scorer(readerContext, true, false, flags, null);
 
       if (childScorer == null) {
         // No matches
@@ -195,7 +196,7 @@
 
     @Override
     public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
-      BlockJoinScorer scorer = (BlockJoinScorer) scorer(context, true, false, context.reader().getLiveDocs());
+      BlockJoinScorer scorer = (BlockJoinScorer) scorer(context, true, false, PostingFeatures.DOCS_AND_FREQS, context.reader().getLiveDocs());
       if (scorer != null && scorer.advance(doc) == doc) {
         return scorer.explain(context.docBase);
       }
@@ -429,6 +430,11 @@
     }
 
     @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
     public long cost() {
       return childScorer.cost();
     }
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
index 76f25f4..b930642 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
@@ -22,6 +22,7 @@
 import org.apache.lucene.index.*;
 import org.apache.lucene.search.*;
 import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.Weight.PostingFeatures;
 import org.apache.lucene.search.grouping.GroupDocs;
 import org.apache.lucene.search.grouping.TopGroups;
 import org.apache.lucene.store.Directory;
@@ -1078,7 +1079,7 @@
 
     ToParentBlockJoinQuery q = new ToParentBlockJoinQuery(tq, parentFilter, ScoreMode.Avg);
     Weight weight = s.createNormalizedWeight(q);
-    DocIdSetIterator disi = weight.scorer(s.getIndexReader().leaves().get(0), true, true, null);
+    DocIdSetIterator disi = weight.scorer(s.getIndexReader().leaves().get(0), true, true, PostingFeatures.DOCS_AND_FREQS, null);
     assertEquals(1, disi.advance(1));
     r.close();
     dir.close();
@@ -1112,7 +1113,7 @@
 
     ToParentBlockJoinQuery q = new ToParentBlockJoinQuery(tq, parentFilter, ScoreMode.Avg);
     Weight weight = s.createNormalizedWeight(q);
-    DocIdSetIterator disi = weight.scorer(s.getIndexReader().leaves().get(0), true, true, null);
+    DocIdSetIterator disi = weight.scorer(s.getIndexReader().leaves().get(0), true, true, PostingFeatures.DOCS_AND_FREQS, null);
     assertEquals(2, disi.advance(0));
     r.close();
     dir.close();
diff --git a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
index ff2b1c5..ab80c35 100644
--- a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
+++ b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
@@ -17,15 +17,6 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.NoSuchElementException;
-
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
@@ -35,10 +26,9 @@
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.BinaryDocValues;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.FieldInvertState;
 import org.apache.lucene.index.Fields;
@@ -55,21 +45,29 @@
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.similarities.Similarity;
-import org.apache.lucene.store.RAMDirectory; // for javadocs
+import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.ByteBlockPool;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefHash.DirectBytesStartArray;
 import org.apache.lucene.util.BytesRefHash;
+import org.apache.lucene.util.BytesRefHash.DirectBytesStartArray;
 import org.apache.lucene.util.Counter;
+import org.apache.lucene.util.IntBlockPool;
 import org.apache.lucene.util.IntBlockPool.SliceReader;
 import org.apache.lucene.util.IntBlockPool.SliceWriter;
-import org.apache.lucene.util.IntBlockPool;
 import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.RecyclingByteBlockAllocator;
 import org.apache.lucene.util.RecyclingIntBlockAllocator;
 
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.NoSuchElementException;
 
 /**
  * High-performance single-document main memory Apache Lucene fulltext search index. 
@@ -957,7 +955,7 @@
       }
 
       @Override
-      public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
+      public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) {
         if (reuse == null || !(reuse instanceof MemoryDocsAndPositionsEnum)) {
           reuse = new MemoryDocsAndPositionsEnum();
         }
@@ -1024,7 +1022,7 @@
       }
     }
     
-    private class MemoryDocsAndPositionsEnum extends DocsAndPositionsEnum {
+    private class MemoryDocsAndPositionsEnum extends DocsEnum {
       private int posUpto; // for assert
       private boolean hasNext;
       private Bits liveDocs;
@@ -1038,7 +1036,7 @@
         this.sliceReader = new SliceReader(intBlockPool);
       }
 
-      public DocsAndPositionsEnum reset(Bits liveDocs, int start, int end, int freq) {
+      public DocsEnum reset(Bits liveDocs, int start, int end, int freq) {
         this.liveDocs = liveDocs;
         this.sliceReader.reset(start, end);
         posUpto = 0; // for assert
@@ -1076,7 +1074,9 @@
 
       @Override
       public int nextPosition() {
-        assert posUpto++ < freq;
+        //assert posUpto++ < freq;
+        if (posUpto++ >= freq)
+          return NO_MORE_POSITIONS;
         assert !sliceReader.endOfSlice() : " stores offsets : " + startOffset;
         if (storeOffsets) {
           int pos = sliceReader.readInt();
diff --git a/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java b/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
index bf35bb8..ce66d26 100644
--- a/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
+++ b/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
@@ -42,7 +42,7 @@
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.CompositeReader;
 import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.IndexReader;
@@ -204,9 +204,9 @@
           while(iwTermsIter.next() != null) {
             assertNotNull(memTermsIter.next());
             assertEquals(iwTermsIter.term(), memTermsIter.term());
-            DocsAndPositionsEnum iwDocsAndPos = iwTermsIter.docsAndPositions(null, null);
-            DocsAndPositionsEnum memDocsAndPos = memTermsIter.docsAndPositions(null, null);
-            while(iwDocsAndPos.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS) {
+            DocsEnum iwDocsAndPos = iwTermsIter.docsAndPositions(null, null);
+            DocsEnum memDocsAndPos = memTermsIter.docsAndPositions(null, null);
+            while(iwDocsAndPos.nextDoc() != DocsEnum.NO_MORE_DOCS) {
               assertEquals(iwDocsAndPos.docID(), memDocsAndPos.nextDoc());
               assertEquals(iwDocsAndPos.freq(), memDocsAndPos.freq());
               for (int i = 0; i < iwDocsAndPos.freq(); i++) {
@@ -225,7 +225,7 @@
             assertEquals(iwTermsIter.term(), memTermsIter.term());
             DocsEnum iwDocsAndPos = iwTermsIter.docs(null, null);
             DocsEnum memDocsAndPos = memTermsIter.docs(null, null);
-            while(iwDocsAndPos.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS) {
+            while(iwDocsAndPos.nextDoc() != DocsEnum.NO_MORE_DOCS) {
               assertEquals(iwDocsAndPos.docID(), memDocsAndPos.nextDoc());
               assertEquals(iwDocsAndPos.freq(), memDocsAndPos.freq());
             }
@@ -351,7 +351,7 @@
       memory.addField("foo", "bar", analyzer);
       AtomicReader reader = (AtomicReader) memory.createSearcher().getIndexReader();
       assertEquals(1, reader.terms("foo").getSumTotalTermFreq());
-      DocsAndPositionsEnum disi = reader.termPositionsEnum(new Term("foo", "bar"));
+      DocsEnum disi = reader.termPositionsEnum(new Term("foo", "bar"));
       int docid = disi.docID();
       assertEquals(-1, docid);
       assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
diff --git a/lucene/misc/src/java/org/apache/lucene/index/sorter/SortingAtomicReader.java b/lucene/misc/src/java/org/apache/lucene/index/sorter/SortingAtomicReader.java
index 469357d..205cd50 100644
--- a/lucene/misc/src/java/org/apache/lucene/index/sorter/SortingAtomicReader.java
+++ b/lucene/misc/src/java/org/apache/lucene/index/sorter/SortingAtomicReader.java
@@ -17,12 +17,8 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.Arrays;
-
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.BinaryDocValues;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfos;
@@ -35,17 +31,14 @@
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.store.RAMFile;
-import org.apache.lucene.store.RAMInputStream;
-import org.apache.lucene.store.RAMOutputStream;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.TimSorter;
 import org.apache.lucene.util.automaton.CompiledAutomaton;
 
+import java.io.IOException;
+
 /**
  * An {@link AtomicReader} which supports sorting documents by a given
  * {@link Sorter}. You can use this class to sort an index as follows:
@@ -161,30 +154,8 @@
     }
 
     @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, final int flags) throws IOException {
-      final DocsAndPositionsEnum inReuse;
-      final SortingDocsAndPositionsEnum wrapReuse;
-      if (reuse != null && reuse instanceof SortingDocsAndPositionsEnum) {
-        // if we're asked to reuse the given DocsEnum and it is Sorting, return
-        // the wrapped one, since some Codecs expect it.
-        wrapReuse = (SortingDocsAndPositionsEnum) reuse;
-        inReuse = wrapReuse.getWrapped();
-      } else {
-        wrapReuse = null;
-        inReuse = reuse;
-      }
-
-      final DocsAndPositionsEnum inDocsAndPositions = in.docsAndPositions(newToOld(liveDocs), inReuse, flags);
-      if (inDocsAndPositions == null) {
-        return null;
-      }
-
-      // we ignore the fact that offsets may be stored but not asked for,
-      // since this code is expected to be used during addIndexes which will
-      // ask for everything. if that assumption changes in the future, we can
-      // factor in whether 'flags' says offsets are not required.
-      final boolean storeOffsets = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
-      return new SortingDocsAndPositionsEnum(docMap.size(), wrapReuse, inDocsAndPositions, docMap, storeOffsets);
+    public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, final int flags) throws IOException {
+      return docs(liveDocs, reuse, flags);
     }
 
   }
@@ -476,236 +447,6 @@
       return in;
     }
   }
-  
-  static class SortingDocsAndPositionsEnum extends FilterDocsAndPositionsEnum {
-    
-    /**
-     * A {@link Sorter} which sorts two parallel arrays of doc IDs and
-     * offsets in one go. Everytime a doc ID is 'swapped', its correponding offset
-     * is swapped too.
-     */
-    private static final class DocOffsetSorter extends TimSorter {
-      
-      private int[] docs;
-      private long[] offsets;
-      private final int[] tmpDocs;
-      private final long[] tmpOffsets;
-      
-      public DocOffsetSorter(int maxDoc) {
-        super(maxDoc / 64);
-        this.tmpDocs = new int[maxDoc / 64];
-        this.tmpOffsets = new long[maxDoc / 64];
-      }
-
-      public void reset(int[] docs, long[] offsets) {
-        this.docs = docs;
-        this.offsets = offsets;
-      }
-
-      @Override
-      protected int compare(int i, int j) {
-        return docs[i] - docs[j];
-      }
-      
-      @Override
-      protected void swap(int i, int j) {
-        int tmpDoc = docs[i];
-        docs[i] = docs[j];
-        docs[j] = tmpDoc;
-        
-        long tmpOffset = offsets[i];
-        offsets[i] = offsets[j];
-        offsets[j] = tmpOffset;
-      }
-
-      @Override
-      protected void copy(int src, int dest) {
-        docs[dest] = docs[src];
-        offsets[dest] = offsets[src];
-      }
-
-      @Override
-      protected void save(int i, int len) {
-        System.arraycopy(docs, i, tmpDocs, 0, len);
-        System.arraycopy(offsets, i, tmpOffsets, 0, len);
-      }
-
-      @Override
-      protected void restore(int i, int j) {
-        docs[j] = tmpDocs[i];
-        offsets[j] = tmpOffsets[i];
-      }
-
-      @Override
-      protected int compareSaved(int i, int j) {
-        return tmpDocs[i] - docs[j];
-      }
-    }
-    
-    private final int maxDoc;
-    private final DocOffsetSorter sorter;
-    private int[] docs;
-    private long[] offsets;
-    private final int upto;
-    
-    private final IndexInput postingInput;
-    private final boolean storeOffsets;
-    
-    private int docIt = -1;
-    private int pos;
-    private int startOffset = -1;
-    private int endOffset = -1;
-    private final BytesRef payload;
-    private int currFreq;
-
-    private final RAMFile file;
-
-    SortingDocsAndPositionsEnum(int maxDoc, SortingDocsAndPositionsEnum reuse, final DocsAndPositionsEnum in, Sorter.DocMap docMap, boolean storeOffsets) throws IOException {
-      super(in);
-      this.maxDoc = maxDoc;
-      this.storeOffsets = storeOffsets;
-      if (reuse != null) {
-        docs = reuse.docs;
-        offsets = reuse.offsets;
-        payload = reuse.payload;
-        file = reuse.file;
-        if (reuse.maxDoc == maxDoc) {
-          sorter = reuse.sorter;
-        } else {
-          sorter = new DocOffsetSorter(maxDoc);
-        }
-      } else {
-        docs = new int[32];
-        offsets = new long[32];
-        payload = new BytesRef(32);
-        file = new RAMFile();
-        sorter = new DocOffsetSorter(maxDoc);
-      }
-      final IndexOutput out = new RAMOutputStream(file);
-      int doc;
-      int i = 0;
-      while ((doc = in.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
-        if (i == docs.length) {
-          final int newLength = ArrayUtil.oversize(i + 1, 4);
-          docs = Arrays.copyOf(docs, newLength);
-          offsets = Arrays.copyOf(offsets, newLength);
-        }
-        docs[i] = docMap.oldToNew(doc);
-        offsets[i] = out.getFilePointer();
-        addPositions(in, out);
-        i++;
-      }
-      upto = i;
-      sorter.reset(docs, offsets);
-      sorter.sort(0, upto);
-      out.close();
-      this.postingInput = new RAMInputStream("", file);
-    }
-
-    // for testing
-    boolean reused(DocsAndPositionsEnum other) {
-      if (other == null || !(other instanceof SortingDocsAndPositionsEnum)) {
-        return false;
-      }
-      return docs == ((SortingDocsAndPositionsEnum) other).docs;
-    }
-
-    private void addPositions(final DocsAndPositionsEnum in, final IndexOutput out) throws IOException {
-      int freq = in.freq();
-      out.writeVInt(freq);
-      int previousPosition = 0;
-      int previousEndOffset = 0;
-      for (int i = 0; i < freq; i++) {
-        final int pos = in.nextPosition();
-        final BytesRef payload = in.getPayload();
-        // The low-order bit of token is set only if there is a payload, the
-        // previous bits are the delta-encoded position. 
-        final int token = (pos - previousPosition) << 1 | (payload == null ? 0 : 1);
-        out.writeVInt(token);
-        previousPosition = pos;
-        if (storeOffsets) { // don't encode offsets if they are not stored
-          final int startOffset = in.startOffset();
-          final int endOffset = in.endOffset();
-          out.writeVInt(startOffset - previousEndOffset);
-          out.writeVInt(endOffset - startOffset);
-          previousEndOffset = endOffset;
-        }
-        if (payload != null) {
-          out.writeVInt(payload.length);
-          out.writeBytes(payload.bytes, payload.offset, payload.length);
-        }
-      }
-    }
-    
-    @Override
-    public int advance(final int target) throws IOException {
-      // need to support it for checkIndex, but in practice it won't be called, so
-      // don't bother to implement efficiently for now.
-      return slowAdvance(target);
-    }
-    
-    @Override
-    public int docID() {
-      return docIt < 0 ? -1 : docIt >= upto ? NO_MORE_DOCS : docs[docIt];
-    }
-    
-    @Override
-    public int endOffset() throws IOException {
-      return endOffset;
-    }
-    
-    @Override
-    public int freq() throws IOException {
-      return currFreq;
-    }
-    
-    @Override
-    public BytesRef getPayload() throws IOException {
-      return payload.length == 0 ? null : payload;
-    }
-    
-    @Override
-    public int nextDoc() throws IOException {
-      if (++docIt >= upto) return DocIdSetIterator.NO_MORE_DOCS;
-      postingInput.seek(offsets[docIt]);
-      currFreq = postingInput.readVInt();
-      // reset variables used in nextPosition
-      pos = 0;
-      endOffset = 0;
-      return docs[docIt];
-    }
-    
-    @Override
-    public int nextPosition() throws IOException {
-      final int token = postingInput.readVInt();
-      pos += token >>> 1;
-      if (storeOffsets) {
-        startOffset = endOffset + postingInput.readVInt();
-        endOffset = startOffset + postingInput.readVInt();
-      }
-      if ((token & 1) != 0) {
-        payload.offset = 0;
-        payload.length = postingInput.readVInt();
-        if (payload.length > payload.bytes.length) {
-          payload.bytes = new byte[ArrayUtil.oversize(payload.length, 1)];
-        }
-        postingInput.readBytes(payload.bytes, 0, payload.length);
-      } else {
-        payload.length = 0;
-      }
-      return pos;
-    }
-    
-    @Override
-    public int startOffset() throws IOException {
-      return startOffset;
-    }
-
-    /** Returns the wrapped {@link DocsAndPositionsEnum}. */
-    DocsAndPositionsEnum getWrapped() {
-      return in;
-    }
-  }
 
   /** Return a sorted view of <code>reader</code> according to the order
    *  defined by <code>sorter</code>. If the reader is already sorted, this
diff --git a/lucene/misc/src/test/org/apache/lucene/index/sorter/SorterTestBase.java b/lucene/misc/src/test/org/apache/lucene/index/sorter/SorterTestBase.java
index e12f528..4a06bb0 100644
--- a/lucene/misc/src/test/org/apache/lucene/index/sorter/SorterTestBase.java
+++ b/lucene/misc/src/test/org/apache/lucene/index/sorter/SorterTestBase.java
@@ -17,13 +17,6 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Random;
-
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
@@ -43,7 +36,6 @@
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInvertState;
@@ -57,7 +49,6 @@
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
-import org.apache.lucene.index.sorter.SortingAtomicReader.SortingDocsAndPositionsEnum;
 import org.apache.lucene.index.sorter.SortingAtomicReader.SortingDocsEnum;
 import org.apache.lucene.search.CollectionStatistics;
 import org.apache.lucene.search.DocIdSetIterator;
@@ -73,6 +64,13 @@
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+
 public abstract class SorterTestBase extends LuceneTestCase {
 
   static final class NormsSimilarity extends Similarity {
@@ -255,7 +253,7 @@
   public void testDocsAndPositionsEnum() throws Exception {
     TermsEnum termsEnum = reader.terms(DOC_POSITIONS_FIELD).iterator(null);
     assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef(DOC_POSITIONS_TERM)));
-    DocsAndPositionsEnum sortedPositions = termsEnum.docsAndPositions(null, null);
+    DocsEnum sortedPositions = termsEnum.docsAndPositions(null, null);
     int doc;
     
     // test nextDoc()
@@ -273,10 +271,10 @@
     }
     
     // test advance()
-    final DocsAndPositionsEnum reuse = sortedPositions;
+    final DocsEnum reuse = sortedPositions;
     sortedPositions = termsEnum.docsAndPositions(null, reuse);
-    if (sortedPositions instanceof SortingDocsAndPositionsEnum) {
-      assertTrue(((SortingDocsAndPositionsEnum) sortedPositions).reused(reuse)); // make sure reuse worked
+    if (sortedPositions instanceof SortingDocsEnum) {
+      assertTrue(((SortingDocsEnum) sortedPositions).reused(reuse)); // make sure reuse worked
     }
     doc = 0;
     while ((doc = sortedPositions.advance(doc + _TestUtil.nextInt(random(), 1, 5))) != DocIdSetIterator.NO_MORE_DOCS) {
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
index 602fa8b..8f4405c 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
@@ -17,12 +17,6 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Set;
-import java.util.Arrays;
-
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
@@ -30,13 +24,20 @@
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.ComplexExplanation;
 import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Weight;
-import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.ToStringUtils;
 
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Set;
+
 /**
  * Query that sets document score as a programmatic function of several (sub) scores:
  * <ol>
@@ -235,19 +236,19 @@
 
     @Override
     public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-        boolean topScorer, Bits acceptDocs) throws IOException {
+        boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
       // Pass true for "scoresDocsInOrder", because we
       // require in-order scoring, even if caller does not,
       // since we call advance on the valSrcScorers.  Pass
       // false for "topScorer" because we will not invoke
       // score(Collector) on these scorers:
-      Scorer subQueryScorer = subQueryWeight.scorer(context, true, false, acceptDocs);
+      Scorer subQueryScorer = subQueryWeight.scorer(context, true, false, flags, acceptDocs);
       if (subQueryScorer == null) {
         return null;
       }
       Scorer[] valSrcScorers = new Scorer[valSrcWeights.length];
       for(int i = 0; i < valSrcScorers.length; i++) {
-         valSrcScorers[i] = valSrcWeights[i].scorer(context, true, topScorer, acceptDocs);
+         valSrcScorers[i] = valSrcWeights[i].scorer(context, true, topScorer, flags, acceptDocs);
       }
       return new CustomScorer(CustomScoreQuery.this.getCustomScoreProvider(context), this, queryWeight, subQueryScorer, valSrcScorers);
     }
@@ -355,6 +356,11 @@
     }
 
     @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      return subQueryScorer.intervals(collectIntervals);
+    }
+
+    @Override
     public long cost() {
       return subQueryScorer.cost();
     }
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java
index ba3163e..570f31d 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java
@@ -18,6 +18,7 @@
  */
 
 import org.apache.lucene.search.*;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
@@ -98,9 +99,9 @@
 
     @Override
     public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-        boolean topScorer, Bits acceptDocs) throws IOException {
+        boolean topScorer,  PostingFeatures flags, Bits acceptDocs) throws IOException {
       // we are gonna advance() the subscorer
-      Scorer subQueryScorer = qWeight.scorer(context, true, false, acceptDocs);
+      Scorer subQueryScorer = qWeight.scorer(context, true, false, flags, acceptDocs);
       if(subQueryScorer == null) {
         return null;
       }
@@ -190,6 +191,11 @@
     }
 
     @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      return scorer.intervals(collectIntervals);
+    }
+
+    @Override
     public long cost() {
       return scorer.cost();
     }
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java
index 6c44b05..8f99cf6 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java
@@ -21,6 +21,7 @@
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.*;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.util.Bits;
 
@@ -91,13 +92,13 @@
 
     @Override
     public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-        boolean topScorer, Bits acceptDocs) throws IOException {
+        boolean topScorer,  PostingFeatures flags, Bits acceptDocs) throws IOException {
       return new AllScorer(context, acceptDocs, this, queryWeight);
     }
 
     @Override
     public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
-      return ((AllScorer)scorer(context, true, true, context.reader().getLiveDocs())).explain(doc);
+      return ((AllScorer)scorer(context, true, true, PostingFeatures.DOCS_AND_FREQS, context.reader().getLiveDocs())).explain(doc);
     }
   }
 
@@ -179,6 +180,11 @@
       result.addDetail(new Explanation(weight.queryNorm,"queryNorm"));
       return result;
     }
+
+    @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      throw new UnsupportedOperationException("AllScorer doesn't support interval iterators.");
+    }
   }
 
 
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java b/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java
index ec8aced..32af534 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java
@@ -20,6 +20,7 @@
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.util.Bits;
 
 import java.io.IOException;
@@ -88,6 +89,11 @@
   }
 
   @Override
+  public IntervalIterator intervals(boolean collectIntervals) throws IOException {    
+    throw new UnsupportedOperationException("ValueSourceScorer doesn't support interval iterators.");
+  }
+
+  @Override
   public int freq() throws IOException {
     return 1;
   }
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java
index 3e22e8e..c0ebabf 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java
@@ -23,6 +23,7 @@
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.docvalues.FloatDocValues;
 import org.apache.lucene.search.*;
+import org.apache.lucene.search.Weight.PostingFeatures;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueFloat;
@@ -123,7 +124,7 @@
     try {
       if (doc < lastDocRequested) {
         if (noMatches) return defVal;
-        scorer = weight.scorer(readerContext, true, false, acceptDocs);
+        scorer = weight.scorer(readerContext, true, false, PostingFeatures.DOCS_AND_FREQS, acceptDocs);
         if (scorer==null) {
           noMatches = true;
           return defVal;
@@ -154,7 +155,7 @@
     try {
       if (doc < lastDocRequested) {
         if (noMatches) return false;
-        scorer = weight.scorer(readerContext, true, false, acceptDocs);
+        scorer = weight.scorer(readerContext, true, false, PostingFeatures.DOCS_AND_FREQS, acceptDocs);
         scorerDoc = -1;
         if (scorer==null) {
           noMatches = true;
@@ -212,7 +213,7 @@
             mval.exists = false;
             return;
           }
-          scorer = weight.scorer(readerContext, true, false, acceptDocs);
+          scorer = weight.scorer(readerContext, true, false, PostingFeatures.DOCS_AND_FREQS, acceptDocs);
           scorerDoc = -1;
           if (scorer==null) {
             noMatches = true;
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
index 8681b59..9465156 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
@@ -17,15 +17,11 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.Iterator;
-
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.lucene41.Lucene41PostingsFormat;
 import org.apache.lucene.index.AssertingAtomicReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.Fields;
@@ -35,6 +31,9 @@
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.util.BytesRef;
 
+import java.io.IOException;
+import java.util.Iterator;
+
 /**
  * Just like {@link Lucene41PostingsFormat} but with additional asserts.
  */
@@ -128,7 +127,6 @@
         termsEnum = terms.iterator(termsEnum);
         BytesRef lastTerm = null;
         DocsEnum docsEnum = null;
-        DocsAndPositionsEnum posEnum = null;
 
         boolean hasFreqs = fieldInfo.getIndexOptions().compareTo(FieldInfo.IndexOptions.DOCS_AND_FREQS) >= 0;
         boolean hasPositions = fieldInfo.getIndexOptions().compareTo(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
@@ -158,13 +156,12 @@
             docsEnum = termsEnum.docs(null, docsEnum, flags);
           } else {
             if (hasPayloads) {
-              flags |= DocsAndPositionsEnum.FLAG_PAYLOADS;
+              flags |= DocsEnum.FLAG_PAYLOADS;
             }
             if (hasOffsets) {
-              flags = flags | DocsAndPositionsEnum.FLAG_OFFSETS;
+              flags = flags | DocsEnum.FLAG_OFFSETS;
             }
-            posEnum = termsEnum.docsAndPositions(null, posEnum, flags);
-            docsEnum = posEnum;
+            docsEnum = termsEnum.docsAndPositions(null, docsEnum, flags);
           }
 
           assert docsEnum != null : "termsEnum=" + termsEnum + " hasPositions=" + hasPositions;
@@ -186,13 +183,13 @@
                 int lastPos = -1;
                 int lastStartOffset = -1;
                 for(int i=0;i<freq;i++) {
-                  int pos = posEnum.nextPosition();
+                  int pos = docsEnum.nextPosition();
                   assert pos >= lastPos: "pos=" + pos + " vs lastPos=" + lastPos + " i=" + i + " freq=" + freq;
                   lastPos = pos;
 
                   if (hasOffsets) {
-                    int startOffset = posEnum.startOffset();
-                    int endOffset = posEnum.endOffset();
+                    int startOffset = docsEnum.startOffset();
+                    int endOffset = docsEnum.endOffset();
                     assert endOffset >= startOffset;
                     assert startOffset >= lastStartOffset;
                     lastStartOffset = startOffset;
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
index 9fcaab7..6c2fa98 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
@@ -17,26 +17,14 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.concurrent.atomic.AtomicInteger;
-
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.TermStats;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.SegmentReadState;
@@ -51,6 +39,17 @@
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.RamUsageEstimator;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.concurrent.atomic.AtomicInteger;
+
 /** Stores all postings data in RAM, but writes a small
  *  token (header + single int) to identify which "slot" the
  *  index is using in RAM HashMap.
@@ -242,7 +241,6 @@
         long sumTotalTermFreq = 0;
         long sumDocFreq = 0;
         DocsEnum docsEnum = null;
-        DocsAndPositionsEnum posEnum = null;
         int enumFlags;
 
         IndexOptions indexOptions = fieldInfo.getIndexOptions();
@@ -257,15 +255,15 @@
           enumFlags = DocsEnum.FLAG_FREQS;
         } else if (writeOffsets == false) {
           if (writePayloads) {
-            enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS;
+            enumFlags = DocsEnum.FLAG_PAYLOADS;
           } else {
             enumFlags = 0;
           }
         } else {
           if (writePayloads) {
-            enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS | DocsAndPositionsEnum.FLAG_OFFSETS;
+            enumFlags = DocsEnum.FLAG_PAYLOADS | DocsEnum.FLAG_OFFSETS;
           } else {
-            enumFlags = DocsAndPositionsEnum.FLAG_OFFSETS;
+            enumFlags = DocsEnum.FLAG_OFFSETS;
           }
         }
 
@@ -275,14 +273,7 @@
             break;
           }
           RAMPostingsWriterImpl postingsWriter = termsConsumer.startTerm(term);
-
-          if (writePositions) {
-            posEnum = termsEnum.docsAndPositions(null, posEnum, enumFlags);
-            docsEnum = posEnum;
-          } else {
-            docsEnum = termsEnum.docs(null, docsEnum, enumFlags);
-            posEnum = null;
-          }
+          docsEnum = termsEnum.docs(null, docsEnum, enumFlags);
 
           int docFreq = 0;
           long totalTermFreq = 0;
@@ -305,13 +296,13 @@
             postingsWriter.startDoc(docID, freq);
             if (writePositions) {
               for (int i=0;i<freq;i++) {
-                int pos = posEnum.nextPosition();
-                BytesRef payload = writePayloads ? posEnum.getPayload() : null;
+                int pos = docsEnum.nextPosition();
+                BytesRef payload = writePayloads ? docsEnum.getPayload() : null;
                 int startOffset;
                 int endOffset;
                 if (writeOffsets) {
-                  startOffset = posEnum.startOffset();
-                  endOffset = posEnum.endOffset();
+                  startOffset = docsEnum.startOffset();
+                  endOffset = docsEnum.endOffset();
                 } else {
                   startOffset = -1;
                   endOffset = -1;
@@ -469,7 +460,7 @@
     }
 
     @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
+    public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) {
       return new RAMDocsAndPositionsEnum(ramField.termToDocs.get(current), liveDocs);
     }
   }
@@ -524,7 +515,7 @@
     } 
   }
 
-  private static class RAMDocsAndPositionsEnum extends DocsAndPositionsEnum {
+  private static class RAMDocsAndPositionsEnum extends DocsEnum {
     private final RAMTerm ramTerm;
     private final Bits liveDocs;
     private RAMDoc current;
@@ -570,6 +561,8 @@
 
     @Override
     public int nextPosition() {
+      if (posUpto >= current.positions.length)
+        return NO_MORE_POSITIONS;
       return current.positions[posUpto++];
     }
 
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/AssertingAtomicReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/AssertingAtomicReader.java
index 086cb21..ade0b79 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/AssertingAtomicReader.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/AssertingAtomicReader.java
@@ -1,13 +1,13 @@
 package org.apache.lucene.index;
 
-import java.io.IOException;
-import java.util.Iterator;
-
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.automaton.CompiledAutomaton;
 
+import java.io.IOException;
+import java.util.Iterator;
+
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -125,16 +125,16 @@
     }
 
     @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+    public DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
       assert state == State.POSITIONED: "docsAndPositions(...) called on unpositioned TermsEnum";
 
       // TODO: should we give this thing a random to be super-evil,
       // and randomly *not* unwrap?
-      if (reuse instanceof AssertingDocsAndPositionsEnum) {
-        reuse = ((AssertingDocsAndPositionsEnum) reuse).in;
+      if (reuse instanceof AssertingDocsEnum) {
+        reuse = ((AssertingDocsEnum) reuse).in;
       }
-      DocsAndPositionsEnum docs = super.docsAndPositions(liveDocs, reuse, flags);
-      return docs == null ? null : new AssertingDocsAndPositionsEnum(docs);
+      DocsEnum docs = super.docsAndPositions(liveDocs, reuse, flags);
+      return docs == null ? null : new AssertingDocsEnum(docs);
     }
 
     // TODO: we should separately track if we are 'at the end' ?
@@ -227,8 +227,10 @@
   /** Wraps a docsenum with additional checks */
   public static class AssertingDocsEnum extends FilterDocsEnum {
     private DocsEnumState state = DocsEnumState.START;
+    int positionCount = 0;
+    int positionMax = 0;
     private int doc;
-    
+
     public AssertingDocsEnum(DocsEnum in) {
       this(in, true);
     }
@@ -253,9 +255,12 @@
       assert nextDoc > doc : "backwards nextDoc from " + doc + " to " + nextDoc + " " + in;
       if (nextDoc == DocIdSetIterator.NO_MORE_DOCS) {
         state = DocsEnumState.FINISHED;
+        positionMax = 0;
       } else {
         state = DocsEnumState.ITERATING;
+        positionMax = super.freq();
       }
+      positionCount = 0;
       assert super.docID() == nextDoc;
       return doc = nextDoc;
     }
@@ -268,9 +273,12 @@
       assert advanced >= target : "backwards advance from: " + target + " to: " + advanced;
       if (advanced == DocIdSetIterator.NO_MORE_DOCS) {
         state = DocsEnumState.FINISHED;
+        positionMax = 0;
       } else {
         state = DocsEnumState.ITERATING;
+        positionMax = super.freq();
       }
+      positionCount = 0;
       assert super.docID() == advanced;
       return doc = advanced;
     }
@@ -286,18 +294,78 @@
       assert state != DocsEnumState.START : "freq() called before nextDoc()/advance()";
       assert state != DocsEnumState.FINISHED : "freq() called after NO_MORE_DOCS";
       int freq = super.freq();
+      if (freq == 0) {
+        System.out.println();
+      }
       assert freq > 0;
       return freq;
     }
+
+    @Override
+    public int nextPosition() throws IOException {
+      assert state != DocsEnumState.START : "nextPosition() called before nextDoc()/advance()";
+      assert state != DocsEnumState.FINISHED : "nextPosition() called after NO_MORE_DOCS";
+      int position = super.nextPosition();
+      assert position >= 0 || position == -1 : "invalid position: " + position;
+      if (positionCount++ >= positionMax)
+        assert position == NO_MORE_POSITIONS : "nextPosition() does not return NO_MORE_POSITIONS when exhausted";
+      return position;
+    }
+
+    @Override
+    public int startOffset() throws IOException {
+      assert state != DocsEnumState.START : "startOffset() called before nextDoc()/advance()";
+      assert state != DocsEnumState.FINISHED : "startOffset() called after NO_MORE_DOCS";
+      assert positionCount > 0 : "startOffset() called before nextPosition()!";
+      assert positionCount <= positionMax : "startOffset() called after NO_MORE_POSITIONS";
+      return super.startOffset();
+    }
+
+    @Override
+    public int endOffset() throws IOException {
+      assert state != DocsEnumState.START : "endOffset() called before nextDoc()/advance()";
+      assert state != DocsEnumState.FINISHED : "endOffset() called after NO_MORE_DOCS";
+      assert positionCount > 0 : "endOffset() called before nextPosition()!";
+      assert positionCount <= positionMax : "endOffset() called after NO_MORE_POSITIONS";
+      return super.endOffset();
+    }
+
+    @Override
+    public int startPosition() throws IOException {
+      assert state != DocsEnumState.START : "startPosition() called before nextDoc()/advance()";
+      assert state != DocsEnumState.FINISHED : "startPosition() called after NO_MORE_DOCS";
+      assert positionCount > 0 : "startPosition() called before nextPosition()!";
+      assert positionCount <= positionMax : "startPosition() called after NO_MORE_POSITIONS";
+      return super.startPosition();
+    }
+
+    @Override
+    public int endPosition() throws IOException {
+      assert state != DocsEnumState.START : "endPosition() called before nextDoc()/advance()";
+      assert state != DocsEnumState.FINISHED : "endPosition() called after NO_MORE_DOCS";
+      assert positionCount > 0 : "endPosition() called before nextPosition()!";
+      assert positionCount <= positionMax : "endPosition() called after NO_MORE_POSITIONS";
+      return super.endPosition();
+    }
+
+    @Override
+    public BytesRef getPayload() throws IOException {
+      assert state != DocsEnumState.START : "getPayload() called before nextDoc()/advance()";
+      assert state != DocsEnumState.FINISHED : "getPayload() called after NO_MORE_DOCS";
+      assert positionCount > 0 : "getPayload() called before nextPosition()!";
+      BytesRef payload = super.getPayload();
+      assert payload == null || payload.length > 0 : "getPayload() returned payload with invalid length!";
+      return payload;
+    }
   }
   
-  static class AssertingDocsAndPositionsEnum extends FilterDocsAndPositionsEnum {
+  static class AssertingDocsAndPositionsEnum extends FilterDocsEnum {
     private DocsEnumState state = DocsEnumState.START;
     private int positionMax = 0;
     private int positionCount = 0;
     private int doc;
 
-    public AssertingDocsAndPositionsEnum(DocsAndPositionsEnum in) {
+    public AssertingDocsAndPositionsEnum(DocsEnum in) {
       super(in);
       int docid = in.docID();
       assert docid == -1 : "invalid initial doc id: " + docid;
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
index 88c668f..ffa507c 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
@@ -17,24 +17,6 @@
  * limitations under the License.
  */
 
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Random;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.FieldsConsumer;
@@ -60,6 +42,24 @@
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Random;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.concurrent.atomic.AtomicLong;
+
 /**
  * Abstract class to do basic tests for a postings format.
  * NOTE: This test focuses on the postings
@@ -119,7 +119,7 @@
 
   /** Given the same random seed this always enumerates the
    *  same random postings */
-  private static class SeedPostings extends DocsAndPositionsEnum {
+  private static class SeedPostings extends DocsEnum {
     // Used only to generate docIDs; this way if you pull w/
     // or w/o positions you get the same docID sequence:
     private final Random docRandom;
@@ -231,7 +231,9 @@
         posUpto = freq;
         return 0;
       }
-      assert posUpto < freq;
+      //assert posUpto < freq;
+      if (posUpto >= freq)
+        return NO_MORE_POSITIONS;
 
       if (posUpto == 0 && random.nextBoolean()) {
         // Sometimes index pos = 0
@@ -628,20 +630,8 @@
     }
 
     @Override
-    public final DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
-      if (liveDocs != null) {
-        throw new IllegalArgumentException("liveDocs must be null");
-      }
-      if (maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
-        return null;
-      }
-      if ((flags & DocsAndPositionsEnum.FLAG_OFFSETS) != 0 && maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) < 0) {
-        return null;
-      }
-      if ((flags & DocsAndPositionsEnum.FLAG_PAYLOADS) != 0 && allowPayloads == false) {
-        return null;
-      }
-      return getSeedPostings(current.getKey().utf8ToString(), current.getValue(), false, maxAllowed, allowPayloads);
+    public final DocsEnum docsAndPositions(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+      return docs(liveDocs, reuse, flags);
     }
   }
 
@@ -724,7 +714,6 @@
   private static class ThreadState {
     // Only used with REUSE option:
     public DocsEnum reuseDocsEnum;
-    public DocsAndPositionsEnum reuseDocsAndPositionsEnum;
   }
 
   private void verifyEnum(ThreadState threadState,
@@ -790,31 +779,29 @@
     DocsEnum prevDocsEnum = null;
 
     DocsEnum docsEnum;
-    DocsAndPositionsEnum docsAndPositionsEnum;
 
     if (!doCheckPositions) {
       if (allowPositions && random().nextInt(10) == 7) {
         // 10% of the time, even though we will not check positions, pull a DocsAndPositions enum
         
         if (options.contains(Option.REUSE_ENUMS) && random().nextInt(10) < 9) {
-          prevDocsEnum = threadState.reuseDocsAndPositionsEnum;
+          prevDocsEnum = threadState.reuseDocsEnum;
         }
 
-        int flags = 0;
+        int flags = DocsEnum.FLAG_NONE;
         if (alwaysTestMax || random().nextBoolean()) {
-          flags |= DocsAndPositionsEnum.FLAG_OFFSETS;
+          flags |= DocsEnum.FLAG_OFFSETS;
         }
         if (alwaysTestMax || random().nextBoolean()) {
-          flags |= DocsAndPositionsEnum.FLAG_PAYLOADS;
+          flags |= DocsEnum.FLAG_PAYLOADS;
         }
 
         if (VERBOSE) {
-          System.out.println("  get DocsAndPositionsEnum (but we won't check positions) flags=" + flags);
+          System.out.println("  get DocsEnum (but we won't check positions) flags=" + flags);
         }
 
-        threadState.reuseDocsAndPositionsEnum = termsEnum.docsAndPositions(liveDocs, (DocsAndPositionsEnum) prevDocsEnum, flags);
-        docsEnum = threadState.reuseDocsAndPositionsEnum;
-        docsAndPositionsEnum = threadState.reuseDocsAndPositionsEnum;
+        threadState.reuseDocsEnum = termsEnum.docsAndPositions(liveDocs, prevDocsEnum, flags);
+        docsEnum = threadState.reuseDocsEnum;
       } else {
         if (VERBOSE) {
           System.out.println("  get DocsEnum");
@@ -824,28 +811,26 @@
         }
         threadState.reuseDocsEnum = termsEnum.docs(liveDocs, prevDocsEnum, doCheckFreqs ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE);
         docsEnum = threadState.reuseDocsEnum;
-        docsAndPositionsEnum = null;
       }
     } else {
       if (options.contains(Option.REUSE_ENUMS) && random().nextInt(10) < 9) {
-        prevDocsEnum = threadState.reuseDocsAndPositionsEnum;
+        prevDocsEnum = threadState.reuseDocsEnum;
       }
 
-      int flags = 0;
+      int flags = DocsEnum.FLAG_NONE;
       if (alwaysTestMax || doCheckOffsets || random().nextInt(3) == 1) {
-        flags |= DocsAndPositionsEnum.FLAG_OFFSETS;
+        flags |= DocsEnum.FLAG_OFFSETS;
       }
       if (alwaysTestMax || doCheckPayloads|| random().nextInt(3) == 1) {
-        flags |= DocsAndPositionsEnum.FLAG_PAYLOADS;
+        flags |= DocsEnum.FLAG_PAYLOADS;
       }
 
       if (VERBOSE) {
-        System.out.println("  get DocsAndPositionsEnum flags=" + flags);
+        System.out.println("  get DocsEnum flags=" + flags);
       }
 
-      threadState.reuseDocsAndPositionsEnum = termsEnum.docsAndPositions(liveDocs, (DocsAndPositionsEnum) prevDocsEnum, flags);
-      docsEnum = threadState.reuseDocsAndPositionsEnum;
-      docsAndPositionsEnum = threadState.reuseDocsAndPositionsEnum;
+      threadState.reuseDocsEnum = termsEnum.docsAndPositions(liveDocs, prevDocsEnum, flags);
+      docsEnum = threadState.reuseDocsEnum;
     }
 
     assertNotNull("null DocsEnum", docsEnum);
@@ -987,7 +972,7 @@
           if (VERBOSE) {
             System.out.println("    now nextPosition to " + pos);
           }
-          assertEquals("position is wrong", pos, docsAndPositionsEnum.nextPosition());
+          assertEquals("position is wrong", pos, docsEnum.nextPosition());
 
           if (doCheckPayloads) {
             BytesRef expectedPayload = expected.getPayload();
@@ -996,9 +981,9 @@
                 System.out.println("      now check expectedPayload length=" + (expectedPayload == null ? 0 : expectedPayload.length));
               }
               if (expectedPayload == null || expectedPayload.length == 0) {
-                assertNull("should not have payload", docsAndPositionsEnum.getPayload());
+                assertNull("should not have payload", docsEnum.getPayload());
               } else {
-                BytesRef payload = docsAndPositionsEnum.getPayload();
+                BytesRef payload = docsEnum.getPayload();
                 assertNotNull("should have payload but doesn't", payload);
 
                 assertEquals("payload length is wrong", expectedPayload.length, payload.length);
@@ -1010,7 +995,7 @@
                 
                 // make a deep copy
                 payload = BytesRef.deepCopyOf(payload);
-                assertEquals("2nd call to getPayload returns something different!", payload, docsAndPositionsEnum.getPayload());
+                assertEquals("2nd call to getPayload returns something different!", payload, docsEnum.getPayload());
               }
             } else {
               if (VERBOSE) {
@@ -1024,8 +1009,8 @@
               if (VERBOSE) {
                 System.out.println("      now check offsets: startOff=" + expected.startOffset() + " endOffset=" + expected.endOffset());
               }
-              assertEquals("startOffset is wrong", expected.startOffset(), docsAndPositionsEnum.startOffset());
-              assertEquals("endOffset is wrong", expected.endOffset(), docsAndPositionsEnum.endOffset());
+              assertEquals("startOffset is wrong", expected.startOffset(), docsEnum.startOffset());
+              assertEquals("endOffset is wrong", expected.endOffset(), docsEnum.endOffset());
             } else {
               if (VERBOSE) {
                 System.out.println("      skip check offsets");
@@ -1035,8 +1020,8 @@
             if (VERBOSE) {
               System.out.println("      now check offsets are -1");
             }
-            assertEquals("startOffset isn't -1", -1, docsAndPositionsEnum.startOffset());
-            assertEquals("endOffset isn't -1", -1, docsAndPositionsEnum.endOffset());
+            assertEquals("startOffset isn't -1", -1, docsEnum.startOffset());
+            assertEquals("endOffset isn't -1", -1, docsEnum.endOffset());
           }
         }
       }
@@ -1434,8 +1419,6 @@
                       BytesRef term = termsEnum.term();
                       if (random().nextBoolean()) {
                         docs = termsEnum.docs(null, docs, DocsEnum.FLAG_FREQS);
-                      } else if (docs instanceof DocsAndPositionsEnum) {
-                        docs = termsEnum.docsAndPositions(null, (DocsAndPositionsEnum) docs, 0);
                       } else {
                         docs = termsEnum.docsAndPositions(null, null, 0);
                       }
@@ -1444,12 +1427,9 @@
                       while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
                         docFreq++;
                         totalTermFreq += docs.freq();
-                        if (docs instanceof DocsAndPositionsEnum) {
-                          DocsAndPositionsEnum posEnum = (DocsAndPositionsEnum) docs;
-                          int limit = _TestUtil.nextInt(random(), 1, docs.freq());
-                          for(int i=0;i<limit;i++) {
-                            posEnum.nextPosition();
-                          }
+                        int limit = _TestUtil.nextInt(random(), 1, docs.freq());
+                        for(int i=0;i<limit;i++) {
+                          docs.nextPosition();
                         }
                       }
 
@@ -1479,8 +1459,6 @@
                       if (termsEnum.seekExact(new BytesRef(term))) {
                         if (random().nextBoolean()) {
                           docs = termsEnum.docs(null, docs, DocsEnum.FLAG_FREQS);
-                        } else if (docs instanceof DocsAndPositionsEnum) {
-                          docs = termsEnum.docsAndPositions(null, (DocsAndPositionsEnum) docs, 0);
                         } else {
                           docs = termsEnum.docsAndPositions(null, null, 0);
                         }
@@ -1490,12 +1468,9 @@
                         while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
                           docFreq++;
                           totalTermFreq += docs.freq();
-                          if (docs instanceof DocsAndPositionsEnum) {
-                            DocsAndPositionsEnum posEnum = (DocsAndPositionsEnum) docs;
-                            int limit = _TestUtil.nextInt(random(), 1, docs.freq());
-                            for(int i=0;i<limit;i++) {
-                              posEnum.nextPosition();
-                            }
+                          int limit = _TestUtil.nextInt(random(), 1, docs.freq());
+                          for(int i=0;i<limit;i++) {
+                            docs.nextPosition();
                           }
                         }
 
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java
index d170a0e..7cf18e9 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java
@@ -391,7 +391,6 @@
   // to test reuse
   private final ThreadLocal<TermsEnum> termsEnum = new ThreadLocal<TermsEnum>();
   private final ThreadLocal<DocsEnum> docsEnum = new ThreadLocal<DocsEnum>();
-  private final ThreadLocal<DocsAndPositionsEnum> docsAndPositionsEnum = new ThreadLocal<DocsAndPositionsEnum>();
 
   protected void assertEquals(RandomTokenStream tk, FieldType ft, Terms terms) throws IOException {
     assertEquals(1, terms.getDocCount());
@@ -429,7 +428,7 @@
       this.docsEnum.set(docsEnum);
 
       bits.clear(0);
-      DocsAndPositionsEnum docsAndPositionsEnum = termsEnum.docsAndPositions(bits, random().nextBoolean() ? null : this.docsAndPositionsEnum.get());
+      DocsEnum docsAndPositionsEnum = termsEnum.docsAndPositions(bits, random().nextBoolean() ? null : this.docsEnum.get());
       assertEquals(ft.storeTermVectorOffsets() || ft.storeTermVectorPositions(), docsAndPositionsEnum != null);
       if (docsAndPositionsEnum != null) {
         assertEquals(DocsEnum.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
@@ -495,7 +494,7 @@
         }
         assertEquals(DocsEnum.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
       }
-      this.docsAndPositionsEnum.set(docsAndPositionsEnum);
+      this.docsEnum.set(docsAndPositionsEnum);
     }
     assertNull(termsEnum.next());
     for (int i = 0; i < 5; ++i) {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java
index a5c30a7..070db6b 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java
@@ -17,16 +17,17 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util._TestUtil;
+
 import java.io.IOException;
 import java.util.List;
 import java.util.Random;
 import java.util.concurrent.ExecutorService;
 
-import org.apache.lucene.index.AtomicReaderContext;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexReaderContext;
-import org.apache.lucene.util._TestUtil;
-
 /**
  * Helper class that adds some extra checks to ensure correct
  * usage of {@code IndexSearcher} and {@code Weight}.
@@ -65,6 +66,23 @@
       }
 
       @Override
+      public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
+          boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
+        Scorer scorer = w.scorer(context, scoreDocsInOrder, topScorer, flags, acceptDocs);
+        if (scorer != null) {
+          // check that scorer obeys disi contract for docID() before next()/advance
+          try {
+            int docid = scorer.docID();
+            assert docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS;
+          } catch (UnsupportedOperationException ignored) {
+            // from a top-level BS1
+            assert topScorer;
+          }
+        }
+        return scorer;
+      }
+
+      @Override
       public float getValueForNormalization() {
         throw new IllegalStateException("Weight already normalized.");
       }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java
index 8d4ab11..1261a1a 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java
@@ -17,12 +17,12 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.Random;
-
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.util.Bits;
 
+import java.io.IOException;
+import java.util.Random;
+
 class AssertingWeight extends Weight {
 
   static Weight wrap(Random random, Weight other) {
@@ -59,11 +59,11 @@
 
   @Override
   public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-                       boolean topScorer, Bits acceptDocs) throws IOException {
+                       boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
     // if the caller asks for in-order scoring or if the weight does not support
     // out-of order scoring then collection will have to happen in-order.
     final boolean inOrder = scoreDocsInOrder || !scoresDocsOutOfOrder();
-    final Scorer inScorer = in.scorer(context, scoreDocsInOrder, topScorer, acceptDocs);
+    final Scorer inScorer = in.scorer(context, scoreDocsInOrder, topScorer, flags, acceptDocs);
     return AssertingScorer.wrap(new Random(random.nextLong()), inScorer, topScorer, inOrder);
   }
 
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
index 621baca..086f3e1 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
@@ -34,6 +34,7 @@
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
+import org.apache.lucene.search.Weight.PostingFeatures;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.RAMDirectory;
@@ -267,7 +268,7 @@
               if (scorer == null) {
                 Weight w = s.createNormalizedWeight(q);
                 AtomicReaderContext context = readerContextArray.get(leafPtr);
-                scorer = w.scorer(context, true, false, context.reader().getLiveDocs());
+                scorer = w.scorer(context, true, false, PostingFeatures.DOCS_AND_FREQS, context.reader().getLiveDocs());
               }
               
               int op = order[(opidx[0]++) % order.length];
@@ -314,7 +315,7 @@
               indexSearcher.setSimilarity(s.getSimilarity());
               Weight w = indexSearcher.createNormalizedWeight(q);
               AtomicReaderContext ctx = (AtomicReaderContext)indexSearcher.getTopReaderContext();
-              Scorer scorer = w.scorer(ctx, true, false, ctx.reader().getLiveDocs());
+              Scorer scorer = w.scorer(ctx, true, false, PostingFeatures.DOCS_AND_FREQS, ctx.reader().getLiveDocs());
               if (scorer != null) {
                 boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
                 Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
@@ -341,7 +342,7 @@
           indexSearcher.setSimilarity(s.getSimilarity());
           Weight w = indexSearcher.createNormalizedWeight(q);
           AtomicReaderContext ctx = previousReader.getContext();
-          Scorer scorer = w.scorer(ctx, true, false, ctx.reader().getLiveDocs());
+          Scorer scorer = w.scorer(ctx, true, false, PostingFeatures.DOCS_AND_FREQS, ctx.reader().getLiveDocs());
           if (scorer != null) {
             boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
             Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
@@ -372,7 +373,7 @@
           long startMS = System.currentTimeMillis();
           for (int i=lastDoc[0]+1; i<=doc; i++) {
             Weight w = s.createNormalizedWeight(q);
-            Scorer scorer = w.scorer(context.get(leafPtr), true, false, liveDocs);
+            Scorer scorer = w.scorer(context.get(leafPtr), true, false, PostingFeatures.DOCS_AND_FREQS, liveDocs);
             Assert.assertTrue("query collected "+doc+" but skipTo("+i+") says no more docs!",scorer.advance(i) != DocIdSetIterator.NO_MORE_DOCS);
             Assert.assertEquals("query collected "+doc+" but skipTo("+i+") got to "+scorer.docID(),doc,scorer.docID());
             float skipToScore = scorer.score();
@@ -400,7 +401,7 @@
           IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
           indexSearcher.setSimilarity(s.getSimilarity());
           Weight w = indexSearcher.createNormalizedWeight(q);
-          Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), true, false, previousReader.getLiveDocs());
+          Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), true, false, PostingFeatures.DOCS_AND_FREQS, previousReader.getLiveDocs());
           if (scorer != null) {
             boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
             Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
@@ -425,7 +426,7 @@
       IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
       indexSearcher.setSimilarity(s.getSimilarity());
       Weight w = indexSearcher.createNormalizedWeight(q);
-      Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), true, false, previousReader.getLiveDocs());
+      Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), true, false, PostingFeatures.DOCS_AND_FREQS, previousReader.getLiveDocs());
       if (scorer != null) {
         boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
         Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
index eb12f54..59a4909 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
@@ -17,51 +17,107 @@
  * limitations under the License.
  */
 
-import java.io.*;
-import java.lang.annotation.*;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.util.*;
-import java.util.concurrent.*;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.logging.Logger;
-
+import com.carrotsearch.randomizedtesting.JUnit4MethodProvider;
+import com.carrotsearch.randomizedtesting.LifecycleScope;
+import com.carrotsearch.randomizedtesting.MixWithSuiteName;
+import com.carrotsearch.randomizedtesting.RandomizedContext;
+import com.carrotsearch.randomizedtesting.RandomizedRunner;
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.carrotsearch.randomizedtesting.annotations.Listeners;
+import com.carrotsearch.randomizedtesting.annotations.SeedDecorators;
+import com.carrotsearch.randomizedtesting.annotations.TestGroup;
+import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction.Action;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup.Group;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies.Consequence;
+import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.carrotsearch.randomizedtesting.rules.NoClassHooksShadowingRule;
+import com.carrotsearch.randomizedtesting.rules.NoInstanceHooksOverridesRule;
+import com.carrotsearch.randomizedtesting.rules.StaticFieldsInvariantRule;
+import com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.*;
 import org.apache.lucene.index.IndexReader.ReaderClosedListener;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.AssertingIndexSearcher;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.FieldCache.CacheEntry;
+import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.QueryUtils.FCInvisibleMultiReader;
-import org.apache.lucene.store.*;
+import org.apache.lucene.store.BaseDirectoryWrapper;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.FlushInfo;
+import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IOContext.Context;
+import org.apache.lucene.store.LockFactory;
+import org.apache.lucene.store.MergeInfo;
+import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.MockDirectoryWrapper.Throttling;
+import org.apache.lucene.store.NRTCachingDirectory;
+import org.apache.lucene.store.RateLimitedDirectoryWrapper;
 import org.apache.lucene.util.FieldCacheSanityChecker.Insanity;
 import org.apache.lucene.util.automaton.AutomatonTestUtil;
 import org.apache.lucene.util.automaton.CompiledAutomaton;
 import org.apache.lucene.util.automaton.RegExp;
-import org.junit.*;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
 import org.junit.rules.RuleChain;
 import org.junit.rules.TestRule;
 import org.junit.runner.RunWith;
-import com.carrotsearch.randomizedtesting.*;
-import com.carrotsearch.randomizedtesting.annotations.*;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction.Action;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup.Group;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies.Consequence;
-import com.carrotsearch.randomizedtesting.generators.RandomPicks;
-import com.carrotsearch.randomizedtesting.rules.NoClassHooksShadowingRule;
-import com.carrotsearch.randomizedtesting.rules.NoInstanceHooksOverridesRule;
-import com.carrotsearch.randomizedtesting.rules.StaticFieldsInvariantRule;
-import com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule;
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.lang.annotation.Documented;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Locale;
+import java.util.Random;
+import java.util.Set;
+import java.util.TimeZone;
+import java.util.TreeSet;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.logging.Logger;
 
 import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean;
 import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsInt;
@@ -1530,8 +1586,8 @@
   public void assertTermsEnumEquals(String info, IndexReader leftReader, TermsEnum leftTermsEnum, TermsEnum rightTermsEnum, boolean deep) throws IOException {
     BytesRef term;
     Bits randomBits = new RandomBits(leftReader.maxDoc(), random().nextDouble(), random());
-    DocsAndPositionsEnum leftPositions = null;
-    DocsAndPositionsEnum rightPositions = null;
+    DocsEnum leftPositions = null;
+    DocsEnum rightPositions = null;
     DocsEnum leftDocs = null;
     DocsEnum rightDocs = null;
     
@@ -1595,7 +1651,7 @@
   /**
    * checks docs + freqs + positions + payloads, sequentially
    */
-  public void assertDocsAndPositionsEnumEquals(String info, DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws IOException {
+  public void assertDocsAndPositionsEnumEquals(String info, DocsEnum leftDocs, DocsEnum rightDocs) throws IOException {
     if (leftDocs == null || rightDocs == null) {
       assertNull(leftDocs);
       assertNull(rightDocs);
@@ -1674,7 +1730,7 @@
   /**
    * checks advancing docs + positions
    */
-  public void assertPositionsSkippingEquals(String info, IndexReader leftReader, int docFreq, DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws IOException {
+  public void assertPositionsSkippingEquals(String info, IndexReader leftReader, int docFreq, DocsEnum leftDocs, DocsEnum rightDocs) throws IOException {
     if (leftDocs == null || rightDocs == null) {
       assertNull(leftDocs);
       assertNull(rightDocs);
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/_TestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/util/_TestUtil.java
index 7ff158b..7bd13a6 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/_TestUtil.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/_TestUtil.java
@@ -66,7 +66,6 @@
 import org.apache.lucene.index.CheckIndex.Status.TermIndexStatus;
 import org.apache.lucene.index.CheckIndex.Status.TermVectorStatus;
 import org.apache.lucene.index.ConcurrentMergeScheduler;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo.DocValuesType;
 import org.apache.lucene.index.FieldInfos;
@@ -948,13 +947,13 @@
       if (random.nextBoolean()) {
         final int posFlags;
         switch (random.nextInt(4)) {
-          case 0: posFlags = 0; break;
-          case 1: posFlags = DocsAndPositionsEnum.FLAG_OFFSETS; break;
-          case 2: posFlags = DocsAndPositionsEnum.FLAG_PAYLOADS; break;
-          default: posFlags = DocsAndPositionsEnum.FLAG_OFFSETS | DocsAndPositionsEnum.FLAG_PAYLOADS; break;
+          case 0: posFlags = DocsEnum.FLAG_NONE; break;
+          case 1: posFlags = DocsEnum.FLAG_OFFSETS; break;
+          case 2: posFlags = DocsEnum.FLAG_PAYLOADS; break;
+          default: posFlags = DocsEnum.FLAG_OFFSETS | DocsEnum.FLAG_PAYLOADS; break;
         }
         // TODO: cast to DocsAndPositionsEnum?
-        DocsAndPositionsEnum docsAndPositions = termsEnum.docsAndPositions(liveDocs, null, posFlags);
+        DocsEnum docsAndPositions = termsEnum.docsAndPositions(liveDocs, null, posFlags);
         if (docsAndPositions != null) {
           return docsAndPositions;
         }
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHPropertiesWriter.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHPropertiesWriter.java
new file mode 100644
index 0000000..5bdfc2e
--- /dev/null
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHPropertiesWriter.java
@@ -0,0 +1,32 @@
+package org.apache.solr.handler.dataimport;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Properties;
+
+public interface DIHPropertiesWriter {
+  
+  public void init(DataImporter dataImporter);
+  
+  public boolean isWritable();
+  
+  public void persist(Properties props);
+  
+  public Properties readIndexerProperties();
+  
+}
diff --git a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solrconfig-end-to-end.xml b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solrconfig-end-to-end.xml
new file mode 100644
index 0000000..0532e0a
--- /dev/null
+++ b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solrconfig-end-to-end.xml
@@ -0,0 +1,41 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<config>
+  <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
+  <updateHandler class="solr.DirectUpdateHandler2">
+    <maxPendingDeletes>100000</maxPendingDeletes>
+  </updateHandler>  
+  <requestDispatcher handleSelect="true" >
+    <requestParsers enableRemoteStreaming="false" multipartUploadLimitInKB="2048" />
+    <httpCaching never304="true" />    
+  </requestDispatcher>
+  <requestHandler name="standard" class="solr.StandardRequestHandler" default="true">
+    <lst name="defaults">
+       <str name="echoParams">explicit</str>       
+    </lst>
+  </requestHandler>  
+  <requestHandler name="/dataimport-end-to-end" class="org.apache.solr.handler.dataimport.DataImportHandler" />  
+  <requestHandler name="/search" class="org.apache.solr.handler.component.SearchHandler">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+    </lst> 
+  </requestHandler>  
+</config>
+
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSqlEntityProcessorDelta2.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSqlEntityProcessorDelta2.java
new file mode 100644
index 0000000..5f4595a
--- /dev/null
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSqlEntityProcessorDelta2.java
@@ -0,0 +1,288 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.handler.dataimport;
+
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * <p>
+ * Test for SqlEntityProcessor which checks variations in primary key names and deleted ids
+ * </p>
+ * 
+ *
+ *
+ * @since solr 1.3
+ */
+@Ignore("FIXME: I fail so often it makes me ill!")
+public class TestSqlEntityProcessorDelta2 extends AbstractDataImportHandlerTestCase {
+  private static final String FULLIMPORT_QUERY = "select * from x";
+
+  private static final String DELTA_QUERY = "select id from x where last_modified > NOW";
+
+  private static final String DELETED_PK_QUERY = "select id from x where last_modified > NOW AND deleted='true'";
+
+  private static final String dataConfig_delta2 =
+    "<dataConfig>" +
+    "  <dataSource  type=\"MockDataSource\"/>\n" +
+    "  <document>\n" +
+    "    <entity name=\"x\" transformer=\"TemplateTransformer\"" +
+    "            query=\"" + FULLIMPORT_QUERY + "\"" +
+    "            deletedPkQuery=\"" + DELETED_PK_QUERY + "\"" +
+    "            deltaImportQuery=\"select * from x where id='${dih.delta.id}'\"" +
+    "            deltaQuery=\"" + DELTA_QUERY + "\">\n" +
+    "      <field column=\"tmpid\" template=\"prefix-${x.id}\" name=\"solr_id\"/>\n" +
+    "      <entity name=\"y\" query=\"select * from y where y.A='${x.id}'\">\n" +
+    "        <field column=\"desc\" />\n" +
+    "      </entity>\n" +
+    "    </entity>\n" +
+    "  </document>\n" +
+    "</dataConfig>\n";
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    initCore("dataimport-solrconfig.xml", "dataimport-solr_id-schema.xml");
+  }
+  
+  @Before @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    clearIndex();
+    assertU(commit());
+  }
+
+  @SuppressWarnings("unchecked")
+  private void add1document() throws Exception {
+    List parentRow = new ArrayList();
+    parentRow.add(createMap("id", "1"));
+    MockDataSource.setIterator(FULLIMPORT_QUERY, parentRow.iterator());
+
+    List childRow = new ArrayList();
+    childRow.add(createMap("desc", "hello"));
+    MockDataSource.setIterator("select * from y where y.A='1'", childRow
+        .iterator());
+
+    runFullImport(dataConfig_delta2);
+
+    assertQ(req("*:* OR add1document"), "//*[@numFound='1']");
+    assertQ(req("solr_id:prefix-1"), "//*[@numFound='1']");
+    assertQ(req("desc:hello"), "//*[@numFound='1']");
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testCompositePk_FullImport() throws Exception {
+    add1document();
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testCompositePk_DeltaImport_delete() throws Exception {
+    add1document();
+    List deletedRow = new ArrayList();
+    deletedRow.add(createMap("id", "1"));
+    MockDataSource.setIterator(DELETED_PK_QUERY, deletedRow.iterator());
+
+    MockDataSource.setIterator(DELTA_QUERY, Collections
+        .EMPTY_LIST.iterator());
+
+    List childRow = new ArrayList();
+    childRow.add(createMap("desc", "hello"));
+    MockDataSource.setIterator("select * from y where y.A='1'", childRow
+        .iterator());
+
+    runDeltaImport(dataConfig_delta2);
+    assertQ(req("*:* OR testCompositePk_DeltaImport_delete"), "//*[@numFound='0']");
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testCompositePk_DeltaImport_empty() throws Exception {
+    List deltaRow = new ArrayList();
+    deltaRow.add(createMap("id", "1"));
+    MockDataSource.setIterator(DELTA_QUERY, deltaRow.iterator());
+
+    MockDataSource.setIterator(DELETED_PK_QUERY, Collections
+        .EMPTY_LIST.iterator());
+
+    List parentRow = new ArrayList();
+    parentRow.add(createMap("id", "1"));
+    MockDataSource.setIterator("select * from x where id='1'", parentRow
+        .iterator());
+
+    List childRow = new ArrayList();
+    childRow.add(createMap("desc", "hello"));
+    MockDataSource.setIterator("select * from y where y.A='1'", childRow
+        .iterator());
+
+    runDeltaImport(dataConfig_delta2);
+
+    assertQ(req("*:* OR testCompositePk_DeltaImport_empty"), "//*[@numFound='1']");
+    assertQ(req("solr_id:prefix-1"), "//*[@numFound='1']");
+    assertQ(req("desc:hello"), "//*[@numFound='1']");
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testCompositePk_DeltaImport_replace_delete() throws Exception {
+    add1document();
+    MockDataSource.clearCache();
+
+    List deltaRow = new ArrayList();
+    deltaRow.add(createMap("id", "1"));
+    MockDataSource.setIterator(DELTA_QUERY,
+        deltaRow.iterator());
+
+    List deletedRow = new ArrayList();
+    deletedRow.add(createMap("id", "1"));
+    MockDataSource.setIterator(DELETED_PK_QUERY,
+        deletedRow.iterator());
+
+    List parentRow = new ArrayList();
+    parentRow.add(createMap("id", "1"));
+    MockDataSource.setIterator("select * from x where id='1'", parentRow
+        .iterator());
+
+    List childRow = new ArrayList();
+    childRow.add(createMap("desc", "goodbye"));
+    MockDataSource.setIterator("select * from y where y.A='1'", childRow
+        .iterator());
+
+    runDeltaImport(dataConfig_delta2);
+
+    assertQ(req("*:* OR testCompositePk_DeltaImport_replace_delete"), "//*[@numFound='0']");
+  }
+
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testCompositePk_DeltaImport_replace_nodelete() throws Exception {
+    add1document();
+    MockDataSource.clearCache();
+
+    List deltaRow = new ArrayList();
+    deltaRow.add(createMap("id", "1"));
+    MockDataSource.setIterator(DELTA_QUERY,
+        deltaRow.iterator());
+
+    MockDataSource.setIterator(DELETED_PK_QUERY, Collections
+        .EMPTY_LIST.iterator());
+
+    List parentRow = new ArrayList();
+    parentRow.add(createMap("id", "1"));
+    MockDataSource.setIterator("select * from x where id='1'", parentRow
+        .iterator());
+
+    List childRow = new ArrayList();
+    childRow.add(createMap("desc", "goodbye"));
+    MockDataSource.setIterator("select * from y where y.A='1'", childRow
+        .iterator());
+
+    runDeltaImport(dataConfig_delta2);
+
+    assertQ(req("*:* OR XtestCompositePk_DeltaImport_replace_nodelete"), "//*[@numFound='1']");
+    assertQ(req("solr_id:prefix-1"), "//*[@numFound='1']");
+    assertQ(req("desc:hello OR XtestCompositePk_DeltaImport_replace_nodelete"), "//*[@numFound='0']");
+    assertQ(req("desc:goodbye"), "//*[@numFound='1']");
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testCompositePk_DeltaImport_add() throws Exception {
+    add1document();
+    MockDataSource.clearCache();
+
+    List deltaRow = new ArrayList();
+    deltaRow.add(createMap("id", "2"));
+    MockDataSource.setIterator(DELTA_QUERY,
+        deltaRow.iterator());
+
+    List parentRow = new ArrayList();
+    parentRow.add(createMap("id", "2"));
+    MockDataSource.setIterator("select * from x where id='2'", parentRow
+        .iterator());
+
+    List childRow = new ArrayList();
+    childRow.add(createMap("desc", "goodbye"));
+    MockDataSource.setIterator("select * from y where y.A='2'", childRow
+        .iterator());
+
+    runDeltaImport(dataConfig_delta2);
+
+    assertQ(req("*:* OR testCompositePk_DeltaImport_add"), "//*[@numFound='2']");
+    assertQ(req("solr_id:prefix-1"), "//*[@numFound='1']");
+    assertQ(req("solr_id:prefix-2"), "//*[@numFound='1']");
+    assertQ(req("desc:hello"), "//*[@numFound='1']");
+    assertQ(req("desc:goodbye"), "//*[@numFound='1']");
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testCompositePk_DeltaImport_nodelta() throws Exception {
+    add1document();
+    MockDataSource.clearCache();
+
+    MockDataSource.setIterator(DELTA_QUERY,
+        Collections.EMPTY_LIST.iterator());
+
+    runDeltaImport(dataConfig_delta2);
+
+    assertQ(req("*:* OR testCompositePk_DeltaImport_nodelta"), "//*[@numFound='1']");
+    assertQ(req("solr_id:prefix-1 OR testCompositePk_DeltaImport_nodelta"), "//*[@numFound='1']");
+    assertQ(req("desc:hello OR testCompositePk_DeltaImport_nodelta"), "//*[@numFound='1']");
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testCompositePk_DeltaImport_add_delete() throws Exception {
+    add1document();
+    MockDataSource.clearCache();
+
+    List deltaRow = new ArrayList();
+    deltaRow.add(createMap("id", "2"));
+    MockDataSource.setIterator(DELTA_QUERY,
+        deltaRow.iterator());
+
+    List deletedRow = new ArrayList();
+    deletedRow.add(createMap("id", "1"));
+    MockDataSource.setIterator(DELETED_PK_QUERY,
+        deletedRow.iterator());
+
+    List parentRow = new ArrayList();
+    parentRow.add(createMap("id", "2"));
+    MockDataSource.setIterator("select * from x where id='2'", parentRow
+        .iterator());
+
+    List childRow = new ArrayList();
+    childRow.add(createMap("desc", "goodbye"));
+    MockDataSource.setIterator("select * from y where y.A='2'", childRow
+        .iterator());
+
+    runDeltaImport(dataConfig_delta2);
+
+    assertQ(req("*:* OR XtestCompositePk_DeltaImport_add_delete"), "//*[@numFound='1']");
+    assertQ(req("solr_id:prefix-2"), "//*[@numFound='1']");
+    assertQ(req("desc:hello"), "//*[@numFound='0']");
+    assertQ(req("desc:goodbye"), "//*[@numFound='1']");
+  }
+}
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSqlEntityProcessorDelta3.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSqlEntityProcessorDelta3.java
new file mode 100644
index 0000000..cf5cc28
--- /dev/null
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSqlEntityProcessorDelta3.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.handler.dataimport;
+
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+@Ignore("FIXME: I fail so often it makes me ill!")
+public class TestSqlEntityProcessorDelta3 extends AbstractDataImportHandlerTestCase {
+  private static final String P_FULLIMPORT_QUERY = "select * from parent";
+  private static final String P_DELTA_QUERY = "select parent_id from parent where last_modified > NOW";
+  private static final String P_DELTAIMPORT_QUERY = "select * from parent where last_modified > NOW AND parent_id=${dih.delta.parent_id}";
+
+  private static final String C_FULLIMPORT_QUERY = "select * from child";
+  private static final String C_DELETED_PK_QUERY = "select id from child where last_modified > NOW AND deleted='true'";
+  private static final String C_DELTA_QUERY = "select id from child where last_modified > NOW";
+  private static final String C_PARENTDELTA_QUERY = "select parent_id from child where id=${child.id}";
+  private static final String C_DELTAIMPORT_QUERY = "select * from child where last_modified > NOW AND parent_id=${dih.delta.parent_id}";
+  
+  private static final String dataConfig_delta =
+    "<dataConfig>" +
+    "  <dataSource  type=\"MockDataSource\"/>\n" +
+    "  <document>" +
+    "    <entity name=\"parent\" pk=\"parent_id\" rootEntity=\"false\"" +
+    "            query=\"" + P_FULLIMPORT_QUERY + "\"" +
+    "            deltaQuery=\"" + P_DELTA_QUERY + "\"" +
+    "            deltaImportQuery=\"" + P_DELTAIMPORT_QUERY + "\">" +
+    "      <field column=\"desc\" name=\"desc\"/>" +
+    "      <entity name=\"child\" pk=\"id\" rootEntity=\"true\"" +
+    "              query=\"" + C_FULLIMPORT_QUERY + "\"" +
+    "              deletedPkQuery=\"" + C_DELETED_PK_QUERY + "\"" +
+    "              deltaQuery=\"" + C_DELTA_QUERY + "\"" +
+    "              parentDeltaQuery=\"" + C_PARENTDELTA_QUERY + "\"" +
+    "              deltaImportQuery=\"" + C_DELTAIMPORT_QUERY + "\">" +
+    "        <field column=\"id\" name=\"id\" />" +
+    "      </entity>" +
+    "    </entity>" +
+    "  </document>" +
+    "</dataConfig>\n";
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    initCore("dataimport-solrconfig.xml", "dataimport-schema.xml");
+  }
+
+  @Before @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    clearIndex();
+    assertU(commit());
+  }
+
+  @SuppressWarnings("unchecked")
+  private void add1document() throws Exception {
+    List parentRow = new ArrayList();
+    parentRow.add(createMap("parent_id", "1", "desc", "d1"));
+    MockDataSource.setIterator(P_FULLIMPORT_QUERY, parentRow.iterator());
+
+    List childRow = new ArrayList();
+    childRow.add(createMap("id", "2"));
+    MockDataSource.setIterator(C_FULLIMPORT_QUERY, childRow.iterator());
+
+    runFullImport(dataConfig_delta);
+
+    assertQ(req("*:* OR add1document"), "//*[@numFound='1']");
+    assertQ(req("id:1"), "//*[@numFound='0']");
+    assertQ(req("id:2"), "//*[@numFound='1']");
+    assertQ(req("desc:d1"), "//*[@numFound='1']");
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testCompositePk_FullImport() throws Exception {
+    add1document();
+  }
+  
+  // WORKS
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testCompositePk_DeltaImport_delete() throws Exception {
+    add1document();
+    List deletedRow = new ArrayList();
+    deletedRow.add(createMap("id", "2"));
+    MockDataSource.setIterator(C_DELETED_PK_QUERY, deletedRow.iterator());
+    MockDataSource.setIterator(C_DELTA_QUERY, Collections.EMPTY_LIST.iterator());
+
+    List deletedParentRow = new ArrayList();
+    deletedParentRow.add(createMap("parent_id", "1"));
+    MockDataSource.setIterator("select parent_id from child where id=2", deletedParentRow.iterator());
+
+    runDeltaImport(dataConfig_delta);
+    assertQ(req("*:* OR testCompositePk_DeltaImport_delete"), "//*[@numFound='0']");
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testCompositePk_DeltaImport_empty() throws Exception {
+    List childDeltaRow = new ArrayList();
+    childDeltaRow.add(createMap("id", "2"));
+    MockDataSource.setIterator(C_DELTA_QUERY, childDeltaRow.iterator());
+    MockDataSource.setIterator(C_DELETED_PK_QUERY, Collections.EMPTY_LIST.iterator());
+    
+    List childParentDeltaRow = new ArrayList();
+    childParentDeltaRow.add(createMap("parent_id", "1"));
+    MockDataSource.setIterator("select parent_id from child where id=2", childParentDeltaRow.iterator());
+    
+    MockDataSource.setIterator(P_DELTA_QUERY, Collections.EMPTY_LIST.iterator());
+
+    List parentDeltaImportRow = new ArrayList();
+    parentDeltaImportRow.add(createMap("parent_id", "1", "desc", "d1"));
+    MockDataSource.setIterator("select * from parent where last_modified > NOW AND parent_id=1",
+        parentDeltaImportRow.iterator());
+
+    List childDeltaImportRow = new ArrayList();
+    childDeltaImportRow.add(createMap("id", "2"));
+    MockDataSource.setIterator("select * from child where last_modified > NOW AND parent_id=1",
+        childDeltaImportRow.iterator());
+    
+    runDeltaImport(dataConfig_delta);
+
+    assertQ(req("*:* OR testCompositePk_DeltaImport_empty"), "//*[@numFound='1']");
+    assertQ(req("id:2"), "//*[@numFound='1']");
+    assertQ(req("desc:d1"), "//*[@numFound='1']");
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testCompositePk_DeltaImport_replace_nodelete() throws Exception {
+    add1document();
+    MockDataSource.clearCache();
+
+    List deltaRow = new ArrayList();
+    deltaRow.add(createMap("parent_id", "1"));
+    MockDataSource.setIterator(P_DELTA_QUERY,
+        deltaRow.iterator());
+
+    List parentRow = new ArrayList();
+    parentRow.add(createMap("parent_id", "1", "desc", "d2"));
+    MockDataSource.setIterator("select * from parent where last_modified > NOW AND parent_id=1",
+        parentRow.iterator());
+
+    List childRow = new ArrayList();
+    childRow.add(createMap("id", "2"));
+    MockDataSource.setIterator("select * from child where last_modified > NOW AND parent_id=1",
+        childRow.iterator());
+
+    MockDataSource.setIterator(C_DELETED_PK_QUERY, Collections
+        .EMPTY_LIST.iterator());
+
+    runDeltaImport(dataConfig_delta);
+
+    assertQ(req("*:* OR XtestCompositePk_DeltaImport_replace_nodelete"), "//*[@numFound='1']");
+    assertQ(req("id:2"), "//*[@numFound='1']");
+    assertQ(req("desc:s1 OR XtestCompositePk_DeltaImport_replace_nodelete"), "//*[@numFound='0']");
+    assertQ(req("desc:d2"), "//*[@numFound='1']");
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testCompositePk_DeltaImport_add() throws Exception {
+    add1document();
+    MockDataSource.clearCache();
+
+    List parentDeltaRow = new ArrayList();
+    parentDeltaRow.add(createMap("parent_id", "1"));
+    MockDataSource.setIterator(P_DELTA_QUERY,
+        parentDeltaRow.iterator());
+
+    List parentRow = new ArrayList();
+    parentRow.add(createMap("parent_id", "1", "desc", "d1"));
+    MockDataSource.setIterator("select * from parent where last_modified > NOW AND parent_id=1",
+        parentRow.iterator());
+
+    List childDeltaRow = new ArrayList();
+    childDeltaRow.add(createMap("id", "3"));
+    MockDataSource.setIterator(C_DELTA_QUERY,
+        childDeltaRow.iterator());
+
+    List childParentDeltaRow = new ArrayList();
+    childParentDeltaRow.add(createMap("parent_id", "1"));
+    MockDataSource.setIterator("select parent_id from child where id='3'",
+        childParentDeltaRow.iterator());
+
+    List childRow = new ArrayList();
+    childRow.add(createMap("id", "3"));
+    MockDataSource.setIterator("select * from child where last_modified > NOW AND parent_id=1",
+        childRow.iterator());
+
+    runDeltaImport(dataConfig_delta);
+
+    assertQ(req("*:* OR testCompositePk_DeltaImport_add"), "//*[@numFound='2']");
+    assertQ(req("id:2"), "//*[@numFound='1']");
+    assertQ(req("id:3"), "//*[@numFound='1']");
+    assertQ(req("desc:d1"), "//*[@numFound='2']");
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testCompositePk_DeltaImport_nodelta() throws Exception {
+    add1document();
+    MockDataSource.clearCache();
+
+    MockDataSource.setIterator(P_DELTA_QUERY,
+        Collections.EMPTY_LIST.iterator());
+
+    MockDataSource.setIterator(C_DELTA_QUERY,
+        Collections.EMPTY_LIST.iterator());
+
+    runDeltaImport(dataConfig_delta);
+
+    assertQ(req("*:* OR testCompositePk_DeltaImport_nodelta"), "//*[@numFound='1']");
+    assertQ(req("id:2 OR testCompositePk_DeltaImport_nodelta"), "//*[@numFound='1']");
+    assertQ(req("desc:d1 OR testCompositePk_DeltaImport_nodelta"), "//*[@numFound='1']");
+  }
+}
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSqlEntityProcessorDeltaPrefixedPk.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSqlEntityProcessorDeltaPrefixedPk.java
new file mode 100644
index 0000000..0f236bf
--- /dev/null
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSqlEntityProcessorDeltaPrefixedPk.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.handler.dataimport;
+
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.logging.*;
+
+/**
+ * <p>
+ * Test for SqlEntityProcessorDelta verifying fix for SOLR-1191
+ * </p>
+ * 
+ *
+ *
+ * @since solr 3.1
+ */
+@Ignore("FIXME: I fail so often it makes me ill!")
+public class TestSqlEntityProcessorDeltaPrefixedPk extends AbstractDataImportHandlerTestCase {
+  private static final String FULLIMPORT_QUERY = "select * from x";
+
+  private static final String DELTA_QUERY = "select id from x where last_modified > NOW";
+
+  private static final String DELETED_PK_QUERY = "select id from x where last_modified > NOW AND deleted='true'";
+
+  private static final String dataConfig_delta =
+    "<dataConfig>" +
+    "  <dataSource  type=\"MockDataSource\"/>\n" +
+    "  <document>\n" +
+    "    <entity name=\"x\" transformer=\"TemplateTransformer\" pk=\"x.id\"" +
+    "            query=\"" + FULLIMPORT_QUERY + "\"" +
+    "            deletedPkQuery=\"" + DELETED_PK_QUERY + "\"" +
+    "            deltaImportQuery=\"select * from x where id='${dih.delta.id}'\"" +
+    "            deltaQuery=\"" + DELTA_QUERY + "\">\n" +
+    "      <field column=\"id\" name=\"id\"/>\n" +
+    "      <field column=\"desc\" name=\"desc\"/>\n" +
+    "    </entity>\n" +
+    "  </document>\n" +
+    "</dataConfig>\n";
+  
+  private static final List EMPTY_LIST = Collections.EMPTY_LIST;
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    initCore("dataimport-solrconfig.xml", "dataimport-schema.xml");
+  }
+
+  @Before @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    clearIndex();
+    assertU(commit());
+    //Logger.getLogger("").setLevel(Level.ALL);
+  }
+
+  @SuppressWarnings("unchecked")
+  private void add1document() throws Exception {
+    List row = new ArrayList();
+    row.add(createMap("id", "1", "desc", "bar"));
+    MockDataSource.setIterator(FULLIMPORT_QUERY, row.iterator());
+
+    runFullImport(dataConfig_delta);
+
+    assertQ(req("*:* OR add1document"), "//*[@numFound='1']");
+    assertQ(req("id:1"), "//*[@numFound='1']");
+    assertQ(req("desc:bar"), "//*[@numFound='1']");
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testDeltaImport_deleteResolvesUnprefixedPk() throws Exception {
+    add1document();
+    MockDataSource.clearCache();
+    List deletedRows = new ArrayList();
+    deletedRows.add(createMap("id", "1"));
+    MockDataSource.setIterator(DELETED_PK_QUERY, deletedRows.iterator());
+    MockDataSource.setIterator(DELTA_QUERY, EMPTY_LIST.iterator());
+    runDeltaImport(dataConfig_delta);
+
+    assertQ(req("*:* OR testDeltaImport_deleteResolvesUnprefixedPk"), "//*[@numFound='0']");
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testDeltaImport_replace_resolvesUnprefixedPk() throws Exception {
+    add1document();
+    MockDataSource.clearCache();
+    List deltaRows = new ArrayList();
+    deltaRows.add(createMap("id", "1"));
+    MockDataSource.setIterator(DELTA_QUERY, deltaRows.iterator());
+    MockDataSource.setIterator(DELETED_PK_QUERY, EMPTY_LIST.iterator());
+    List rows = new ArrayList();
+    rows.add(createMap("id", "1", "desc", "baz"));
+    MockDataSource.setIterator("select * from x where id='1'", rows.iterator());
+
+    runDeltaImport(dataConfig_delta);
+
+    assertQ(req("*:* OR testDeltaImport_replace_resolvesUnprefixedPk"), "//*[@numFound='1']");
+    assertQ(req("id:1"), "//*[@numFound='1']");
+    assertQ(req("desc:bar"), "//*[@numFound='0']");
+    assertQ(req("desc:baz"), "//*[@numFound='1']");
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testDeltaImport_addResolvesUnprefixedPk() throws Exception {
+    add1document();
+    MockDataSource.clearCache();
+
+    List deltaRows = new ArrayList();
+    deltaRows.add(createMap("id", "2"));
+    MockDataSource.setIterator(DELTA_QUERY, deltaRows.iterator());
+
+    List rows = new ArrayList();
+    rows.add(createMap("id", "2", "desc", "xyzzy"));
+    MockDataSource.setIterator("select * from x where id='2'", rows.iterator());
+
+    runDeltaImport(dataConfig_delta);
+
+    assertQ(req("*:* OR testDeltaImport_addResolvesUnprefixedPk"), "//*[@numFound='2']");
+    assertQ(req("id:1"), "//*[@numFound='1']");
+    assertQ(req("id:2"), "//*[@numFound='1']");
+    assertQ(req("desc:bar"), "//*[@numFound='1']");
+    assertQ(req("desc:xyzzy"), "//*[@numFound='1']");
+  }
+
+}
diff --git a/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java b/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
index 3378847..aa01c57 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
@@ -13,7 +13,7 @@
 import java.util.Map;
 import java.util.Map.Entry;
 
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.IndexReader;
@@ -335,7 +335,7 @@
     docNL.add(field, fieldNL);
 
     BytesRef text;
-    DocsAndPositionsEnum dpEnum = null;
+    DocsEnum dpEnum = null;
     while((text = termsEnum.next()) != null) {
       String term = text.utf8ToString();
       NamedList<Object> termInfo = new NamedList<Object>();
diff --git a/solr/core/src/java/org/apache/solr/schema/LatLonType.java b/solr/core/src/java/org/apache/solr/schema/LatLonType.java
index 70235ac..ea7515a 100644
--- a/solr/core/src/java/org/apache/solr/schema/LatLonType.java
+++ b/solr/core/src/java/org/apache/solr/schema/LatLonType.java
@@ -29,6 +29,8 @@
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.valuesource.VectorValueSource;
+import org.apache.lucene.search.intervals.IntervalIterator;
+
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.ComplexExplanation;
@@ -358,13 +360,13 @@
 
     @Override
     public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-        boolean topScorer, Bits acceptDocs) throws IOException {
+        boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
       return new SpatialScorer(context, acceptDocs, this, queryWeight);
     }
 
     @Override
     public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
-      return ((SpatialScorer)scorer(context, true, true, context.reader().getLiveDocs())).explain(doc);
+      return ((SpatialScorer)scorer(context, true, true, PostingFeatures.DOCS_AND_FREQS, context.reader().getLiveDocs())).explain(doc);
     }
   }
 
@@ -523,6 +525,11 @@
       result.addDetail(new Explanation(weight.queryNorm,"queryNorm"));
       return result;
     }
+
+    @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      throw new UnsupportedOperationException();
+    }
   }
 
   @Override
diff --git a/solr/core/src/java/org/apache/solr/search/JoinQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/JoinQParserPlugin.java
index 86756cc..8184ce3 100644
--- a/solr/core/src/java/org/apache/solr/search/JoinQParserPlugin.java
+++ b/solr/core/src/java/org/apache/solr/search/JoinQParserPlugin.java
@@ -18,6 +18,7 @@
 
 import org.apache.lucene.index.*;
 import org.apache.lucene.search.*;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -219,7 +220,7 @@
 
     @Override
     public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-        boolean topScorer, Bits acceptDocs) throws IOException {
+        boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
       if (filter == null) {
         boolean debug = rb != null && rb.isDebug();
         long start = debug ? System.currentTimeMillis() : 0;
@@ -487,7 +488,7 @@
 
     @Override
     public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
-      Scorer scorer = scorer(context, true, false, context.reader().getLiveDocs());
+      Scorer scorer = scorer(context, true, false, PostingFeatures.DOCS_AND_FREQS, context.reader().getLiveDocs());
       boolean exists = scorer.advance(doc) == doc;
 
       ComplexExplanation result = new ComplexExplanation();
@@ -547,6 +548,15 @@
     }
 
     @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      if (iter instanceof Scorer) {
+        return ((Scorer) iter).intervals(collectIntervals);
+      }
+      throw new UnsupportedOperationException("Positions are only supported for Scorers");
+
+    }
+
+    @Override
     public long cost() {
       return iter.cost();
     }
diff --git a/solr/core/src/java/org/apache/solr/search/SolrConstantScoreQuery.java b/solr/core/src/java/org/apache/solr/search/SolrConstantScoreQuery.java
index 2b6268e..52b954b 100644
--- a/solr/core/src/java/org/apache/solr/search/SolrConstantScoreQuery.java
+++ b/solr/core/src/java/org/apache/solr/search/SolrConstantScoreQuery.java
@@ -2,6 +2,7 @@
 
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.*;
+import org.apache.lucene.search.intervals.IntervalIterator;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.AtomicReaderContext;
@@ -120,7 +121,7 @@
 
     @Override
     public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-        boolean topScorer, Bits acceptDocs) throws IOException {
+        boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
       return new ConstantScorer(context, this, queryWeight, acceptDocs);
     }
 
@@ -198,6 +199,14 @@
     }
 
     @Override
+    public IntervalIterator intervals(boolean collectIntervals) throws IOException {
+      if (docIdSetIterator instanceof Scorer) {
+        return ((Scorer) docIdSetIterator).intervals(collectIntervals);
+      }
+      throw new UnsupportedOperationException("Positions are only supported for Scorers");
+    }
+
+    @Override
     public long cost() {
       return docIdSetIterator.cost();
     }
diff --git a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
index bad1d95..5622d36 100644
--- a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
+++ b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
@@ -82,6 +82,7 @@
 import org.apache.lucene.search.TopScoreDocCollector;
 import org.apache.lucene.search.TotalHitCountCollector;
 import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.Weight.PostingFeatures;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -2347,7 +2348,7 @@
         iterators.add(iter);
       }
       for (Weight w : weights) {
-        Scorer scorer = w.scorer(context, true, false, context.reader().getLiveDocs());
+        Scorer scorer = w.scorer(context, true, false, PostingFeatures.DOCS_AND_FREQS, context.reader().getLiveDocs());
         if (scorer == null) return null;
         iterators.add(scorer);
       }
diff --git a/solr/core/src/java/org/apache/solr/search/join/IgnoreAcceptDocsQuery.java b/solr/core/src/java/org/apache/solr/search/join/IgnoreAcceptDocsQuery.java
index 7e057fe..b902970 100644
--- a/solr/core/src/java/org/apache/solr/search/join/IgnoreAcceptDocsQuery.java
+++ b/solr/core/src/java/org/apache/solr/search/join/IgnoreAcceptDocsQuery.java
@@ -86,8 +86,8 @@
     }
 
     @Override
-    public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
-      return w.scorer(context, scoreDocsInOrder, topScorer, null);
+    public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, PostingFeatures flags, Bits acceptDocs) throws IOException {
+      return w.scorer(context, scoreDocsInOrder, topScorer, flags, null);
     }
   }
 
diff --git a/solr/licenses/jetty-continuation-8.1.2.v20120308.jar.sha1 b/solr/licenses/jetty-continuation-8.1.2.v20120308.jar.sha1
new file mode 100644
index 0000000..1276297
--- /dev/null
+++ b/solr/licenses/jetty-continuation-8.1.2.v20120308.jar.sha1
@@ -0,0 +1 @@
+545d335d2f6d5e195939528f6a37f23abad4f58f
diff --git a/solr/licenses/jetty-deploy-8.1.2.v20120308.jar.sha1 b/solr/licenses/jetty-deploy-8.1.2.v20120308.jar.sha1
new file mode 100644
index 0000000..3414c0f
--- /dev/null
+++ b/solr/licenses/jetty-deploy-8.1.2.v20120308.jar.sha1
@@ -0,0 +1 @@
+79b1ef70ba4bb4c05d35516f795ff306a96bb25a
diff --git a/solr/licenses/jetty-http-8.1.2.v20120308.jar.sha1 b/solr/licenses/jetty-http-8.1.2.v20120308.jar.sha1
new file mode 100644
index 0000000..aafa446
--- /dev/null
+++ b/solr/licenses/jetty-http-8.1.2.v20120308.jar.sha1
@@ -0,0 +1 @@
+650858c9c7344da2455b60069224ee148a80bdc5
diff --git a/solr/licenses/jetty-io-8.1.2.v20120308.jar.sha1 b/solr/licenses/jetty-io-8.1.2.v20120308.jar.sha1
new file mode 100644
index 0000000..13f2aae
--- /dev/null
+++ b/solr/licenses/jetty-io-8.1.2.v20120308.jar.sha1
@@ -0,0 +1 @@
+2da8e10c38250f713764a31bc4b7dbc58983de0e
diff --git a/solr/licenses/jetty-jmx-8.1.2.v20120308.jar.sha1 b/solr/licenses/jetty-jmx-8.1.2.v20120308.jar.sha1
new file mode 100644
index 0000000..6368fa2
--- /dev/null
+++ b/solr/licenses/jetty-jmx-8.1.2.v20120308.jar.sha1
@@ -0,0 +1 @@
+049299fdc468aec112070369513f363447c12e76
diff --git a/solr/licenses/jetty-security-8.1.2.v20120308.jar.sha1 b/solr/licenses/jetty-security-8.1.2.v20120308.jar.sha1
new file mode 100644
index 0000000..4ce07f9
--- /dev/null
+++ b/solr/licenses/jetty-security-8.1.2.v20120308.jar.sha1
@@ -0,0 +1 @@
+3a559bfb2788e71b4469631497c58c93ba273259
diff --git a/solr/licenses/jetty-server-8.1.2.v20120308.jar.sha1 b/solr/licenses/jetty-server-8.1.2.v20120308.jar.sha1
new file mode 100644
index 0000000..1dc00a9
--- /dev/null
+++ b/solr/licenses/jetty-server-8.1.2.v20120308.jar.sha1
@@ -0,0 +1 @@
+5d56afa0f80e90aa40c5af42b4f7b82992794f1f
diff --git a/solr/licenses/jetty-servlet-8.1.2.v20120308.jar.sha1 b/solr/licenses/jetty-servlet-8.1.2.v20120308.jar.sha1
new file mode 100644
index 0000000..9667c1f
--- /dev/null
+++ b/solr/licenses/jetty-servlet-8.1.2.v20120308.jar.sha1
@@ -0,0 +1 @@
+d855e7a18f0381b6128ccf4563355e969f826433
diff --git a/solr/licenses/jetty-util-8.1.2.v20120308.jar.sha1 b/solr/licenses/jetty-util-8.1.2.v20120308.jar.sha1
new file mode 100644
index 0000000..f7322cb
--- /dev/null
+++ b/solr/licenses/jetty-util-8.1.2.v20120308.jar.sha1
@@ -0,0 +1 @@
+d14aef3cae042cd9716fb109d1205bfd84248956
diff --git a/solr/licenses/jetty-webapp-8.1.2.v20120308.jar.sha1 b/solr/licenses/jetty-webapp-8.1.2.v20120308.jar.sha1
new file mode 100644
index 0000000..a1709c7
--- /dev/null
+++ b/solr/licenses/jetty-webapp-8.1.2.v20120308.jar.sha1
@@ -0,0 +1 @@
+436ed4d774f26ac348e4a84938af19130b8f9773
diff --git a/solr/licenses/jetty-xml-8.1.2.v20120308.jar.sha1 b/solr/licenses/jetty-xml-8.1.2.v20120308.jar.sha1
new file mode 100644
index 0000000..3f98b4e
--- /dev/null
+++ b/solr/licenses/jetty-xml-8.1.2.v20120308.jar.sha1
@@ -0,0 +1 @@
+ade750a7b75b6ce58c6e50347b2c1e6dafc1eb4b
diff --git a/solr/licenses/zookeeper-3.3.6.jar.sha1 b/solr/licenses/zookeeper-3.3.6.jar.sha1
new file mode 100644
index 0000000..8bd4cd0
--- /dev/null
+++ b/solr/licenses/zookeeper-3.3.6.jar.sha1
@@ -0,0 +1 @@
+36825ff1595144d42d2f3a51f810eaefdcf8cb79